text stringlengths 0 1.05M | meta dict |
|---|---|
"""A helper function for parsing and executing wit.ai skills."""
import logging
import json
import aiohttp
from voluptuous import Required
from opsdroid.const import WITAI_DEFAULT_VERSION
from opsdroid.const import WITAI_API_ENDPOINT
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {Required("token"): str, "min-score": float}
async def call_witai(message, config):
"""Call the wit.ai api and return the response."""
async with aiohttp.ClientSession(trust_env=True) as session:
headers = {"Authorization": "Bearer " + config["token"]}
payload = {"v": WITAI_DEFAULT_VERSION, "q": message.text}
resp = await session.get(
WITAI_API_ENDPOINT + "v={}&q={}".format(payload["v"], payload["q"]),
headers=headers,
)
result = await resp.json()
_LOGGER.info(_("wit.ai response - %s."), json.dumps(result))
return result
async def parse_witai(opsdroid, skills, message, config):
"""Parse a message against all witai skills."""
matched_skills = []
if "token" in config:
try:
result = await call_witai(message, config)
except aiohttp.ClientOSError:
_LOGGER.error(_("No response from wit.ai, check your network."))
return matched_skills
if "code" in result:
_LOGGER.error(
_("wit.ai error - %s %s"), str(result["code"]), str(result["error"])
)
return matched_skills
if result["entities"] == {}:
_LOGGER.error(
_("wit.ai error - No intent found. Did you forget to create one?")
)
return matched_skills
try:
confidence = result["entities"]["intent"][0]["confidence"]
except KeyError:
confidence = 0.0
if "min-score" in config and confidence < config["min-score"]:
_LOGGER.info(_("wit.ai score lower than min-score."))
return matched_skills
if result:
for skill in skills:
for matcher in skill.matchers:
if "witai_intent" in matcher:
if matcher["witai_intent"] in [
i["value"] for i in result["entities"]["intent"]
]:
message.witai = result
for key, entity in result["entities"].items():
if key != "intent":
message.update_entity(
key, entity[0]["value"], entity[0]["confidence"]
)
matched_skills.append(
{
"score": confidence,
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
| {
"repo_name": "FabioRosado/opsdroid",
"path": "opsdroid/parsers/witai.py",
"copies": "3",
"size": "3059",
"license": "apache-2.0",
"hash": 3737355968324776400,
"line_mean": 36.3048780488,
"line_max": 88,
"alpha_frac": 0.4923177509,
"autogenerated": false,
"ratio": 4.698924731182796,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005779821063653991,
"num_lines": 82
} |
"""A helper function for parsing event type skills."""
import logging
from opsdroid.events import Event
_LOGGER = logging.getLogger(__name__)
async def match_event(event, event_opts):
"""Filter and matches the event."""
event_type = event_opts.get("type", None)
if event_type:
# The event type can be specified with a string
if isinstance(event_type, str):
# pylint: disable=invalid-name
et = Event.event_registry.get(event_type, None)
if et is None:
raise ValueError(
"{event_type} is not a valid opsdroid"
" event representation.".format(event_type=event_type)
)
event_type = et
# TODO: Add option to match all subclasses as well
# if isinstance(event, event_type):
# pylint: disable=unidiomatic-typecheck
if type(event) is event_type:
for key in event_opts:
if key != "type":
event_value = event_opts.get(key, None)
entity_value = event.entities.get(key, {}).get("value", None)
if (
isinstance(event_value, list)
and isinstance(entity_value, list)
and sorted(event_value) != sorted(entity_value)
):
return False
if event_value != entity_value:
return False
return True
return False
async def parse_event_type(opsdroid, event):
"""Parse an event if it's of a certain type."""
for skill in opsdroid.skills:
for constraint in skill.constraints:
if not constraint(event):
return
for matcher in skill.matchers:
event_opts = matcher.get("event_type", {})
result = await match_event(event, event_opts)
if result:
await opsdroid.run_skill(skill, skill.config, event)
| {
"repo_name": "opsdroid/opsdroid",
"path": "opsdroid/parsers/event_type.py",
"copies": "3",
"size": "2024",
"license": "apache-2.0",
"hash": 7033986564143498000,
"line_mean": 32.1803278689,
"line_max": 81,
"alpha_frac": 0.5385375494,
"autogenerated": false,
"ratio": 4.487804878048781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019992003198720512,
"num_lines": 61
} |
"""A helper function for training, parsing and executing Rasa NLU skills."""
import logging
import json
import unicodedata
from hashlib import sha256
import aiohttp
import arrow
from opsdroid.const import RASANLU_DEFAULT_URL, RASANLU_DEFAULT_MODELS_PATH
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {"url": str, "token": str, "models-path": str, "min-score": float}
async def _get_all_intents(skills):
"""Get all skill intents and concatenate into a single markdown string."""
intents = [skill["intents"] for skill in skills if skill["intents"] is not None]
if not intents:
return None
intents = "\n\n".join(intents)
return unicodedata.normalize("NFKD", intents).encode("ascii")
async def _get_intents_fingerprint(intents):
"""Return a hash of the intents."""
return sha256(intents).hexdigest()
async def _build_training_url(config):
"""Build the url for training a Rasa NLU model."""
url = "{}/model/train".format(
config.get("url", RASANLU_DEFAULT_URL),
)
if "token" in config:
url += "?&token={}".format(config["token"])
return url
async def _build_status_url(config):
"""Build the url for getting the status of Rasa NLU."""
url = "{}/status".format(config.get("url", RASANLU_DEFAULT_URL))
if "token" in config:
url += "?&token={}".format(config["token"])
return url
async def _init_model(config):
"""Make a request to force Rasa NLU to load the model into memory."""
_LOGGER.info(_("Initialising Rasa NLU model."))
initialisation_start = arrow.now()
result = await call_rasanlu("", config)
if result is None:
_LOGGER.error(_("Initialisation failed, training failed.."))
return False
time_taken = int((arrow.now() - initialisation_start).total_seconds())
_LOGGER.info(_("Initialisation complete in %s seconds."), time_taken)
return True
async def _get_rasa_nlu_version(config):
"""Get Rasa NLU version data"""
async with aiohttp.ClientSession(trust_env=True) as session:
url = config.get("url", RASANLU_DEFAULT_URL) + "/version"
try:
resp = await session.get(url)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU."))
return None
if resp.status == 200:
result = await resp.json()
_LOGGER.debug(_("Rasa NLU response - %s."), json.dumps(result))
else:
result = await resp.text()
_LOGGER.error(_("Bad Rasa NLU response - %s."), result)
return result
async def _check_rasanlu_compatibility(config):
"""Check if Rasa NLU is compatible with the API we implement"""
_LOGGER.debug(_("Checking Rasa NLU version."))
json_object = await _get_rasa_nlu_version(config)
version = json_object["version"]
minimum_compatible_version = json_object["minimum_compatible_version"]
# Make sure we don't run against a 1.x.x Rasa NLU because it has a different API
if int(minimum_compatible_version[0:1]) >= 2:
_LOGGER.debug(_("Rasa NLU version {}.".format(version)))
return True
_LOGGER.error(
_(
"Incompatible Rasa NLU version ({}). Use Rasa Version >= 2.X.X.".format(
version
)
)
)
return False
async def _load_model(config):
"""Load model from the filesystem of the Rasa NLU environment"""
async with aiohttp.ClientSession(trust_env=True) as session:
headers = {}
data = {
"model_file": "{}/{}".format(
config.get("models-path", RASANLU_DEFAULT_MODELS_PATH),
config["model_filename"],
),
}
url = config.get("url", RASANLU_DEFAULT_URL) + "/model"
if "token" in config:
url += "?token={}".format(config["token"])
try:
resp = await session.put(url, data=json.dumps(data), headers=headers)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU."))
return None
if resp.status == 204:
result = await resp.json()
else:
result = await resp.text()
_LOGGER.error(_("Bad Rasa NLU response - %s."), result)
return result
async def _is_model_loaded(config):
"""Check whether the model is loaded in Rasa NLU"""
async with aiohttp.ClientSession(trust_env=True) as session:
url = config.get("url", RASANLU_DEFAULT_URL) + "/status"
if "token" in config:
url += "?token={}".format(config["token"])
try:
resp = await session.get(await _build_status_url(config))
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU."))
return None
if resp.status == 200:
result = await resp.json()
if result["model_file"].find(config["model_filename"]):
return True
return False
async def train_rasanlu(config, skills):
"""Train a Rasa NLU model based on the loaded skills."""
_LOGGER.info(_("Starting Rasa NLU training."))
intents = await _get_all_intents(skills)
if intents is None:
_LOGGER.warning(_("No intents found, skipping training."))
return False
await _check_rasanlu_compatibility(config)
"""
TODO: think about how to correlate intent with trained model
so we can just load the model without training it again if it wasn't changed
"""
async with aiohttp.ClientSession(trust_env=True) as session:
_LOGGER.info(_("Now training the model. This may take a while..."))
url = await _build_training_url(config)
headers = {"Content-Type": "application/x-yaml"}
try:
training_start = arrow.now()
resp = await session.post(url, data=intents, headers=headers)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU, training failed."))
return False
if resp.status == 200:
if (
resp.content_type == "application/x-tar"
and resp.content_disposition.type == "attachment"
):
time_taken = (arrow.now() - training_start).total_seconds()
_LOGGER.info(
_("Rasa NLU training completed in %s seconds."), int(time_taken)
)
config["model_filename"] = resp.content_disposition.filename
# close the connection and don't retrieve the model tar
# because using it is currently not implemented
resp.close()
"""
model_path = "/tmp/{}".format(resp.content_disposition.filename)
try:
output_file = open(model_path,"wb")
data = await resp.read()
output_file.write(data)
output_file.close()
_LOGGER.debug("Rasa taining model file saved to {}", model_path)
except:
_LOGGER.error("Cannot save rasa taining model file to {}", model_path)
"""
await _load_model(config)
# Check if the current trained model is loaded
if await _is_model_loaded(config):
_LOGGER.info(_("Successfully loaded Rasa NLU model."))
else:
_LOGGER.error(_("Failed getting Rasa NLU server status."))
return False
# Check if we will get a valid response from Rasa
await call_rasanlu("", config)
return True
_LOGGER.error(_("Bad Rasa NLU response - %s."), await resp.text())
_LOGGER.error(_("Rasa NLU training failed."))
return False
async def call_rasanlu(text, config):
"""Call the Rasa NLU api and return the response."""
async with aiohttp.ClientSession(trust_env=True) as session:
headers = {}
data = {"text": text}
url = config.get("url", RASANLU_DEFAULT_URL) + "/model/parse"
if "token" in config:
url += "?&token={}".format(config["token"])
try:
resp = await session.post(url, data=json.dumps(data), headers=headers)
except aiohttp.client_exceptions.ClientConnectorError:
_LOGGER.error(_("Unable to connect to Rasa NLU."))
return None
if resp.status == 200:
result = await resp.json()
_LOGGER.debug(_("Rasa NLU response - %s."), json.dumps(result))
else:
result = await resp.text()
_LOGGER.error(_("Bad Rasa NLU response - %s."), result)
return result
async def parse_rasanlu(opsdroid, skills, message, config):
"""Parse a message against all Rasa NLU skills."""
matched_skills = []
try:
result = await call_rasanlu(message.text, config)
except aiohttp.ClientOSError:
_LOGGER.error(_("No response from Rasa NLU, check your network."))
return matched_skills
if result == "unauthorized":
_LOGGER.error(_("Rasa NLU error - Unauthorised request. Check your 'token'."))
return matched_skills
if result is None or "intent" not in result or result["intent"] is None:
_LOGGER.error(
_("Rasa NLU error - No intent found. Did you forget to create one?")
)
return matched_skills
confidence = result["intent"]["confidence"]
if "min-score" in config and confidence < config["min-score"]:
_LOGGER.info(_("Rasa NLU score lower than min-score"))
return matched_skills
if result:
for skill in skills:
for matcher in skill.matchers:
if "rasanlu_intent" in matcher:
if matcher["rasanlu_intent"] == result["intent"]["name"]:
message.rasanlu = result
for entity in result["entities"]:
message.update_entity(
entity["entity"], entity["value"], entity["confidence"]
)
matched_skills.append(
{
"score": confidence,
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
| {
"repo_name": "jacobtomlinson/opsdroid",
"path": "opsdroid/parsers/rasanlu.py",
"copies": "2",
"size": "10676",
"license": "apache-2.0",
"hash": -3132005126475477500,
"line_mean": 35.6872852234,
"line_max": 90,
"alpha_frac": 0.5721243912,
"autogenerated": false,
"ratio": 4.204805041354864,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5776929432554864,
"avg_score": null,
"num_lines": null
} |
"""A helper function that executes a series of List queries for many APIs."""
import copy
__all__ = [
'YieldFromList',
]
def YieldFromList(
service, request, limit=None, batch_size=100,
method='List', field='items', predicate=None,
current_token_attribute='pageToken',
next_token_attribute='nextPageToken',
batch_size_attribute='maxResults'):
"""Make a series of List requests, keeping track of page tokens.
Args:
service: apitools_base.BaseApiService, A service with a .List() method.
request: protorpc.messages.Message, The request message corresponding to the
service's .List() method, with all the attributes populated except
the .maxResults and .pageToken attributes.
limit: int, The maximum number of records to yield. None if all available
records should be yielded.
batch_size: int, The number of items to retrieve per request.
method: str, The name of the method used to fetch resources.
field: str, The field in the response that will be a list of items.
predicate: lambda, A function that returns true for items to be yielded.
current_token_attribute: str, The name of the attribute in a request message
holding the page token for the page being requested.
next_token_attribute: str, The name of the attribute in a response message
holding the page token for the next page.
batch_size_attribute: str, The name of the attribute in a response message
holding the maximum number of results to be returned.
Yields:
protorpc.message.Message, The resources listed by the service.
"""
request = copy.deepcopy(request)
setattr(request, batch_size_attribute, batch_size)
setattr(request, current_token_attribute, None)
while limit is None or limit:
response = getattr(service, method)(request)
items = getattr(response, field)
if predicate:
items = list(filter(predicate, items))
for item in items:
yield item
if limit is None:
continue
limit -= 1
if not limit:
return
token = getattr(response, next_token_attribute)
if not token:
return
setattr(request, current_token_attribute, token)
| {
"repo_name": "wemanuel/smry",
"path": "smry/server-auth/ls/google-cloud-sdk/.install/.backup/lib/googlecloudapis/apitools/base/py/list_pager.py",
"copies": "4",
"size": "2208",
"license": "apache-2.0",
"hash": -4528370331874206700,
"line_mean": 37.0689655172,
"line_max": 80,
"alpha_frac": 0.7028985507,
"autogenerated": false,
"ratio": 4.246153846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008133881187831817,
"num_lines": 58
} |
"""A helper module for customization."""
import collections
from typing import Any, Dict, List
import fnmatch
import voluptuous as vol
from homeassistant.const import CONF_ENTITY_ID
from homeassistant.core import HomeAssistant, split_entity_id
import homeassistant.helpers.config_validation as cv
_OVERWRITE_KEY_FORMAT = '{}.overwrite'
_OVERWRITE_CACHE_KEY_FORMAT = '{}.overwrite_cache'
_CUSTOMIZE_SCHEMA_ENTRY = vol.Schema({
vol.Required(CONF_ENTITY_ID): vol.All(
cv.ensure_list_csv, vol.Length(min=1), [vol.Schema(str)], [vol.Lower])
}, extra=vol.ALLOW_EXTRA)
def _convert_old_config(inp: Any) -> List:
if not isinstance(inp, dict):
return cv.ensure_list(inp)
if CONF_ENTITY_ID in inp:
return [inp] # sigle entry
res = []
inp = vol.Schema({cv.match_all: dict})(inp)
for key, val in inp.items():
val = dict(val)
val[CONF_ENTITY_ID] = key
res.append(val)
return res
CUSTOMIZE_SCHEMA = vol.All(_convert_old_config, [_CUSTOMIZE_SCHEMA_ENTRY])
def set_customize(
hass: HomeAssistant, domain: str, customize: List[Dict]) -> None:
"""Overwrite all current customize settings.
Async friendly.
"""
hass.data[_OVERWRITE_KEY_FORMAT.format(domain)] = customize
hass.data[_OVERWRITE_CACHE_KEY_FORMAT.format(domain)] = {}
def get_overrides(hass: HomeAssistant, domain: str, entity_id: str) -> Dict:
"""Return a dictionary of overrides related to entity_id.
Whole-domain overrides are of lowest priorities,
then glob on entity ID, and finally exact entity_id
matches are of highest priority.
The lookups are cached.
"""
cache_key = _OVERWRITE_CACHE_KEY_FORMAT.format(domain)
if cache_key in hass.data and entity_id in hass.data[cache_key]:
return hass.data[cache_key][entity_id]
overwrite_key = _OVERWRITE_KEY_FORMAT.format(domain)
if overwrite_key not in hass.data:
return {}
domain_result = {} # type: Dict[str, Any]
glob_result = {} # type: Dict[str, Any]
exact_result = {} # type: Dict[str, Any]
domain = split_entity_id(entity_id)[0]
def clean_entry(entry: Dict) -> Dict:
"""Clean up entity-matching keys."""
entry.pop(CONF_ENTITY_ID, None)
return entry
def deep_update(target: Dict, source: Dict) -> None:
"""Deep update a dictionary."""
for key, value in source.items():
if isinstance(value, collections.Mapping):
updated_value = target.get(key, {})
# If the new value is map, but the old value is not -
# overwrite the old value.
if not isinstance(updated_value, collections.Mapping):
updated_value = {}
deep_update(updated_value, value)
target[key] = updated_value
else:
target[key] = source[key]
for rule in hass.data[overwrite_key]:
if CONF_ENTITY_ID in rule:
entities = rule[CONF_ENTITY_ID]
if domain in entities:
deep_update(domain_result, rule)
if entity_id in entities:
deep_update(exact_result, rule)
for entity_id_glob in entities:
if entity_id_glob == entity_id:
continue
if fnmatch.fnmatchcase(entity_id, entity_id_glob):
deep_update(glob_result, rule)
break
result = {}
deep_update(result, clean_entry(domain_result))
deep_update(result, clean_entry(glob_result))
deep_update(result, clean_entry(exact_result))
if cache_key not in hass.data:
hass.data[cache_key] = {}
hass.data[cache_key][entity_id] = result
return result
| {
"repo_name": "kyvinh/home-assistant",
"path": "homeassistant/helpers/customize.py",
"copies": "3",
"size": "3752",
"license": "apache-2.0",
"hash": -162111667948171900,
"line_mean": 34.0654205607,
"line_max": 78,
"alpha_frac": 0.6199360341,
"autogenerated": false,
"ratio": 3.793731041456016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 107
} |
"""A helper module to create opsdroid events from matrix events."""
import logging
from collections import defaultdict
from opsdroid import events
from . import events as matrix_events
_LOGGER = logging.getLogger(__name__)
__all__ = ["MatrixEventCreator"]
def trim_reply_fallback_text(text):
# Copyright (C) 2018 Tulir Asokan
# Borrowed from https://github.com/tulir/mautrix-telegram
# Having been given explicit permission to include it "under the terms of any OSI approved licence"
# https://matrix.to/#/!FPUfgzXYWTKgIrwKxW:matrix.org/$15365871364925maRqg:maunium.net
if not text.startswith("> ") or "\n" not in text:
return text # pragma: no cover
lines = text.split("\n")
while len(lines) > 0 and lines[0].startswith("> "):
lines.pop(0)
return "\n".join(lines).strip()
class MatrixEventCreator(events.EventCreator):
"""Create opsdroid events from matrix ones."""
async def create_event_from_eventid(self, eventid, roomid):
"""Return an ``Event`` based on an event id in a room."""
room_context = await self.connector.connection.room_context(roomid, eventid, 1)
event_json = room_context.event.source
return await self.create_event(event_json, roomid)
def __init__(self, connector, *args, **kwargs):
"""Initialise the event creator."""
super().__init__(connector, *args, **kwargs)
self.event_types["m.room.message"] = self.create_room_message
self.event_types["m.room.topic"] = self.create_room_description
self.event_types["m.room.name"] = self.create_room_name
self.event_types["m.reaction"] = self.create_reaction
self.event_types["m.room.member"] = self.create_join_room
self.message_events = defaultdict(lambda: self.skip)
self.message_events.update(
{
"m.text": self.create_message,
"m.image": self.create_image,
"m.file": self.create_file,
# 'm.emote':
# 'm.notice':
# 'm.video':
# 'm.audio':
# 'm.location':
}
)
async def skip(self, event, roomid):
"""Generate a generic event (state event if appropriate)."""
kwargs = dict(
content=event["content"],
event_type=event["type"],
user_id=event["sender"],
user=await self.connector.get_nick(roomid, event["sender"]),
target=roomid,
connector=self.connector,
raw_event=event,
event_id=event["event_id"],
)
event_type = matrix_events.GenericMatrixRoomEvent
if "state_key" in event:
event_type = matrix_events.MatrixStateEvent
kwargs["state_key"] = event["state_key"]
try:
event = event_type(**kwargs)
return event
except Exception: # pragma: nocover
_LOGGER.exception(
f"Matrix connector failed to parse event {event} as a room event."
)
return None
async def create_room_message(self, event, roomid):
"""Dispatch a m.room.message event."""
msgtype = event["content"]["msgtype"]
return await self.message_events[msgtype](event, roomid)
async def create_message(self, event, roomid):
"""Send a Message event."""
kwargs = dict(
text=event["content"]["body"],
user_id=event["sender"],
user=await self.connector.get_nick(roomid, event["sender"]),
target=roomid,
connector=self.connector,
event_id=event["event_id"],
raw_event=event,
)
if "m.relates_to" in event["content"]:
relates_to = event["content"]["m.relates_to"]
# Detect an edit.
if relates_to.get("rel_type", "") == "m.replace":
kwargs["text"] = event["content"]["m.new_content"]["body"]
kwargs["linked_event"] = await self.create_event_from_eventid(
relates_to["event_id"], roomid
)
return events.EditedMessage(**kwargs)
# Detect a reply
if relates_to.get("m.in_reply_to"):
kwargs["text"] = trim_reply_fallback_text(kwargs["text"])
kwargs["linked_event"] = await self.create_event_from_eventid(
relates_to["m.in_reply_to"]["event_id"], roomid
)
return events.Reply(**kwargs)
return events.Message(**kwargs)
async def _file_kwargs(self, event, roomid):
if "url" in event["content"]:
url = event["content"]["url"]
else:
url = event["content"]["file"]["url"]
url = await self.connector.connection.mxc_to_http(url)
user = await self.connector.get_nick(roomid, event["sender"])
return dict(
url=url,
name=event["content"]["body"],
user_id=event["sender"],
user=user,
target=roomid,
connector=self.connector,
event_id=event["event_id"],
raw_event=event,
)
async def create_file(self, event, roomid):
"""Send a File event."""
kwargs = await self._file_kwargs(event, roomid)
return events.File(**kwargs)
async def create_image(self, event, roomid):
"""Send a Image event."""
kwargs = await self._file_kwargs(event, roomid)
return events.Image(**kwargs)
async def create_room_description(self, event, roomid):
"""Send a RoomDescriptionEvent."""
return events.RoomDescription(
description=event["content"]["topic"],
user=await self.connector.get_nick(roomid, event["sender"]),
user_id=event["sender"],
target=roomid,
connector=self.connector,
event_id=event["event_id"],
raw_event=event,
)
async def create_room_name(self, event, roomid):
"""Send a RoomDescriptionEvent."""
return events.RoomName(
name=event["content"]["name"],
user=await self.connector.get_nick(roomid, event["sender"]),
user_id=event["sender"],
target=roomid,
connector=self.connector,
event_id=event["event_id"],
raw_event=event,
)
async def create_reaction(self, event, roomid):
"""Send a Reaction event."""
parent_event_id = event["content"]["m.relates_to"]["event_id"]
parent_event = await self.create_event_from_eventid(parent_event_id, roomid)
return events.Reaction(
emoji=event["content"]["m.relates_to"]["key"],
user=await self.connector.get_nick(roomid, event["sender"]),
user_id=event["sender"],
target=roomid,
connector=self.connector,
event_id=event["event_id"],
linked_event=parent_event,
raw_event=event,
)
async def create_join_room(self, event, roomid):
"""Send a JoinRoomEvent."""
if event["content"]["membership"] == "join":
return events.JoinRoom(
user=await self.connector.get_nick(roomid, event["sender"]),
user_id=event["sender"],
target=roomid,
connector=self.connector,
event_id=event["event_id"],
raw_event=event,
)
| {
"repo_name": "FabioRosado/opsdroid",
"path": "opsdroid/connector/matrix/create_events.py",
"copies": "3",
"size": "7538",
"license": "apache-2.0",
"hash": -3481010709602380000,
"line_mean": 36.5024875622,
"line_max": 103,
"alpha_frac": 0.5619527726,
"autogenerated": false,
"ratio": 3.9947005829358773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6056653355535877,
"avg_score": null,
"num_lines": null
} |
"""A helper module to create opsdroid events from Slack events."""
import logging
from collections import defaultdict
from opsdroid import events
from opsdroid.connector.slack import events as slack_events
_LOGGER = logging.getLogger(__name__)
class SlackEventCreator(events.EventCreator):
"""Create opsdroid events from Slack ones."""
def __init__(self, connector, *args, **kwargs):
"""Initialise the event creator."""
super().__init__(connector, *args, **kwargs)
self.connector = connector
self.event_types["message"] = self.create_room_message
self.event_types["channel_created"] = self.create_newroom
self.event_types["channel_archive"] = self.archive_room
self.event_types["channel_unarchive"] = self.unarchive_room
self.event_types["team_join"] = self.create_join_group
self.event_types["channel_rename"] = self.channel_name_changed
self.event_types["pin_added"] = self.message_pinned
self.event_types["pin_removed"] = self.message_unpinned
self.event_types["block_actions"] = self.block_actions_triggered
self.event_types["message_action"] = self.message_action_triggered
self.event_types["view_submission"] = self.view_submission_triggered
self.event_types["view_closed"] = self.view_closed_triggered
self.message_subtypes = defaultdict(lambda: self.create_message)
self.message_subtypes.update(
{
"message": self.create_message,
"bot_message": self.handle_bot_message,
"message_changed": self.edit_message,
"channel_join": self.handle_channel_join,
}
)
async def create_room_message(self, event, channel):
"""Dispatch a message event of arbitrary subtype."""
channel = event["channel"]
msgtype = event["subtype"] if "subtype" in event.keys() else "message"
return await self.message_subtypes[msgtype](event, channel)
async def _get_user_name(self, event):
try:
user_info = await self.connector.lookup_username(event["user"])
except (ValueError, KeyError) as error:
_LOGGER.error(_("Username lookup failed for %s."), error)
return
return user_info["name"]
async def handle_bot_message(self, event, channel):
"""Check that a bot message is opsdroid if not create the message"""
if event["bot_id"] != self.connector.bot_id:
return await self.create_message(event, channel)
async def create_message(self, event, channel):
"""Send a Message event."""
user_name = await self._get_user_name(event)
if user_name is None:
return
_LOGGER.debug("Replacing userids in message with usernames")
text = await self.connector.replace_usernames(event["text"])
return events.Message(
text,
user=user_name,
user_id=event["user"],
target=event["channel"],
connector=self.connector,
event_id=event["ts"],
raw_event=event,
)
async def edit_message(self, event, channel):
"""Send an EditedMessage event."""
user_name = await self._get_user_name(event["message"])
if user_name is None:
return
_LOGGER.debug("Replacing userids in message with usernames")
text = await self.connector.replace_usernames(event["message"]["text"])
return events.EditedMessage(
text,
user=user_name,
user_id=event["message"]["user"],
target=event["channel"],
connector=self.connector,
event_id=event["ts"],
linked_event=event["message"]["ts"],
raw_event=event,
)
async def handle_channel_join(self, event, channel):
"""Send a JoinRoom event when a user joins the channel."""
user_id = event["user"]
user_info = await self.connector.lookup_username(user_id)
return events.JoinRoom(
user_id=user_id,
user=user_info["name"],
target=event["channel"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
)
async def create_newroom(self, event, channel):
"""Send a NewRoom event."""
user_id = event["channel"]["creator"]
user_info = await self.connector.lookup_username(user_id)
name = event["channel"].get("name_normalized", event["channel"].get("name"))
return events.NewRoom(
name=name,
params=None,
user=user_info["name"],
user_id=user_id,
target=event["channel"]["id"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
)
async def archive_room(self, event, channel):
"""Send a ChannelArchived event."""
return slack_events.ChannelArchived(
target=event["channel"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
)
async def unarchive_room(self, event, channel):
"""Send a ChannelUnarchived event."""
return slack_events.ChannelUnarchived(
target=event["channel"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
)
async def create_join_group(self, event, channel):
"""Send a JoinGroup event."""
user_info = await self.connector.lookup_username(event["user"]["id"])
return events.JoinGroup(
target=event["user"]["team_id"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
user_id=event["user"]["id"],
user=user_info["name"],
)
async def channel_name_changed(self, event, channel):
"""Send a RoomName event."""
return events.RoomName(
name=event["channel"]["name"],
target=event["channel"]["id"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
)
async def message_pinned(self, event, channel):
"""Send a PinMessage event."""
return events.PinMessage(
linked_event=event["item"],
target=event["channel_id"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
)
async def message_unpinned(self, event, channel):
"""Send an UnpinMessage event."""
return events.UnpinMessage(
linked_event=event["item"],
target=event["channel_id"],
connector=self.connector,
event_id=event["event_ts"],
raw_event=event,
)
async def block_actions_triggered(self, event, channel):
"""Send a BlockActions event."""
block_actions = []
for action in event["actions"]:
block_action = slack_events.BlockActions(
event,
user=event["user"]["id"],
target=event["channel"]["id"],
connector=self.connector,
)
action_value = None
if action["type"] == "button":
action_value = action["value"]
elif action["type"] in ["overflow", "static_select"]:
action_value = action["selected_option"]["value"]
elif action["type"] == "datepicker":
action_value = action["selected_date"]
elif action["type"] == "multi_static_select":
action_value = [v["value"] for v in action["selected_options"]]
if action_value:
block_action.update_entity("value", action_value)
block_actions.append(block_action)
return block_actions
async def message_action_triggered(self, event, channel):
"""Send a MessageAction event."""
return slack_events.MessageAction(
event,
user=event["user"]["id"],
target=event["channel"]["id"],
connector=self.connector,
)
async def view_submission_triggered(self, event, channel):
"""Send a ViewSubmission event."""
return slack_events.ViewSubmission(
event,
user=event["user"]["id"],
target=event["user"]["id"],
connector=self.connector,
)
async def view_closed_triggered(self, event, channel):
"""Send a ViewClosed event."""
return slack_events.ViewClosed(
event,
user=event["user"]["id"],
target=event["user"]["id"],
connector=self.connector,
)
| {
"repo_name": "jacobtomlinson/opsdroid",
"path": "opsdroid/connector/slack/create_events.py",
"copies": "2",
"size": "8878",
"license": "apache-2.0",
"hash": 3450214042172813300,
"line_mean": 33.277992278,
"line_max": 84,
"alpha_frac": 0.5698355485,
"autogenerated": false,
"ratio": 4.286817962337035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5856653510837034,
"avg_score": null,
"num_lines": null
} |
"""A helper module to generate fake records for testing.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import re
import json
import argparse
import random
from faker import Faker
fake = Faker()
# Parse arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("f", metavar="filename",
help="JSON filename (file automatically saved to test dir)")
argparser.add_argument("--N", metavar="num_records",
help="number of records to create",
default=100)
argparser.add_argument("--overwrite", required=False,
metavar="True/False", help="overwrite existing files (True/False)",
default='True')
argparser.add_argument("--start_date", required=False,
metavar="START_DATE",
help="earliest year for birthdate (+/-<number of years>y)",
default="-99y")
argparser.add_argument("--end_date", required=False,
metavar="END_DATE", help="latest year for birthdate",
default="-1y")
args = argparser.parse_args()
def create_population(N, start_date, end_date):
"""Creates a fake population.
A population consists of N profiles of various individuals, where each
profile includes at least a name, address, and SSN. Herd generation is not
100% reproducible, so each new population will essentially be random.
Args:
N (int): Number of profiles to create.
start_date (str): Earliest date to use for birthdate. In the form of
``+/-<number of years>y``.
end_date (str): Latest date to use for birthdate, in the same format
as ``start_date``.
Returns:
tuple: A tuple of dictionaries, each representing an individual profile,
sorted by profile name.
"""
profile_fields = ['address', 'ssn', 'blood_group']
population = [fake.profile(fields=profile_fields) for i in range(N)]
gender_change = {'M': 'F', 'F': 'M'}
for i in range(N):
record = population[i]
# Birthdate
birthdate = fake.date_time_between(start_date="-99y", end_date="-1y")
record['birthdate'] = birthdate.strftime("%Y-%m-%d")
# Name, Sex, and Gender
record['sex'] = 'F' if random.random() <= 0.60 else 'M'
has_middle_name = True if random.random() <= 0.50 else False
is_married = True if random.random() <= 0.49 else False
sex = record['sex']
if sex == 'F':
record['forename'] = fake.first_name_female()
if has_middle_name:
record['mid_forename'] = fake.last_name()
record['current_surname'] = fake.last_name()
if is_married:
record['birth_surname'] = fake.last_name_female()
else:
record['forename'] = fake.first_name_male()
record['mid_forename'] = fake.last_name() if has_middle_name else ''
record['current_surname'] = fake.last_name_male()
record['gender'] = gender_change[sex] if random.random() < 0.05 else sex
# Do some manipulation of keys to match expected Profile fields
address = record.pop('address')
address = re.split('[\n,]', address)
record['state_province'] = next(s for s in address[-1].split(' ') if s)
record['postal_code'] = address[-1].split(' ')[-1]
record['city'] = address[1]
record['address1'] = address[0] # First consider address to be one field
# But then try to split address into two fields and overwrite the field
# if necessary.
delimiters = ['Suite', 'Ste', 'Unit', 'Apartment', 'Apt', 'Department',
'Dpt']
for delimiter in delimiters:
split_address = address[0].split(delimiter)
if len(split_address) > 1:
record['address1'] = split_address[0].rstrip()
record['address2'] = delimiter + split_address[1]
# Fix SSN key to match Profile fields
ssn = record.pop('ssn')
record['national_id1'] = ssn
# Split birthdate to match Profile fields
birthdate = record.pop('birthdate')
birthdate = birthdate.split('-')
record['birth_year'] = birthdate[0]
record['birth_month'] = birthdate[1]
record['birth_day'] = birthdate[2]
# Fix 0+/- blood type to be O+/-
blood = record.pop('blood_group')
record['blood_type'] = blood.replace('0', 'O')
return tuple(sorted(population, key=lambda profile: profile['forename']))
if __name__ == '__main__':
# Abort if file exists and overwrite is False
this_dir = os.path.dirname(os.path.realpath(__file__))
if args.f in os.listdir(this_dir) and args.overwrite != 'True':
print("Filename exists...aborting.")
elif args.f not in os.listdir(this_dir) or args.overwrite == 'True':
# Generate records
records = create_population(int(args.N), args.start_date, args.end_date)
# Write records
filepath = os.path.join(this_dir, args.f)
with open(filepath, 'w+') as data_file:
json.dump(records, data_file)
# Confirm success
print("Data written to {}".format(filepath))
| {
"repo_name": "nsh87/ehrcorral",
"path": "tests/gen_records.py",
"copies": "1",
"size": "5213",
"license": "isc",
"hash": 4628451986145361000,
"line_mean": 39.7265625,
"line_max": 81,
"alpha_frac": 0.6182620372,
"autogenerated": false,
"ratio": 3.774800868935554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9837841657711479,
"avg_score": 0.011044249684814942,
"num_lines": 128
} |
"""A helper script to generate the external examples gallery"""
import os
def format_icon(title, description, link, image):
body = r"""
.. raw:: html
<div class="sphx-glr-thumbcontainer" tooltip="{}">
.. only:: html
.. figure:: ./images/external-examples/{}
:target: {}
{}
.. raw:: html
</div>
.. toctree::
:hidden:
{} <{}>
"""
content = body.format(description, image, link, title, title, link)
return content
class Example():
def __init__(self, title, description, link, image):
self.title = title
self.description = description
self.link = link
self.image = image
def format(self):
return format_icon(self.title, self.description, self.link, self.image)
###############################################################################
articles = dict(
omf=Example(title="3D visualization for the Open Mining Format (omf)",
description="3D visualization for the Open Mining Format (omf)",
link="https://opengeovis.github.io/omfvista/examples/index.html",
image="omfvista.png"),
discretize=Example(title="3D Rendering with Discretize",
description="3D Rendering with Discretize",
link="http://discretize.simpeg.xyz/en/master/examples/plot_pyvista_laguna.html",
image="discretize.png"),
open_foam=Example(title="OpenFOAM Rendering",
description="OpenFOAM Rendering",
link="https://pswpswpsw.github.io/posts/2018/09/blog-post-modify-vtk-openfoam/",
image="open-foam.png"),
aero_sandbox=Example(title="AeroSandbox",
description="AeroSandbox",
link="https://peterdsharpe.github.io/AeroSandbox/",
image="AeroSandbox.png"),
forge=Example(title="FORGE Geothermal Project",
description="FORGE Geothermal Project",
link="https://forge.pvgeo.org/project/index.html",
image="forge.png"),
pvgeo=Example(title="PVGeo's example gallery",
description="PVGeo's example gallery",
link="https://pvgeo.org/examples/index.html",
image="pvgeo.png"),
tetgen=Example(title="TetGen's example gallery",
description="TetGen's example gallery",
link="http://tetgen.pyvista.org/examples/index.html",
image="tetgen.png"),
mesh_fix=Example(title="PyMeshFix's example gallery",
description="PyMeshFix's example gallery",
link="http://pymeshfix.pyvista.org/examples/index.html",
image="pymeshfix.png"),
orvisu=Example(title="Orvisu Demo Application",
description="Orvisu Demo Application",
link="https://github.com/BartheG/Orvisu",
image="orvisu.gif"),
flem=Example(title="FLEM: A diffusive landscape evolution model",
description="FLEM: A diffusive landscape evolution model",
link="https://github.com/johnjarmitage/flem",
image="flem.png"),
optimization=Example(title="Optimization visualization with PyVista",
description="Optimization visualization with PyVista",
link="https://gist.github.com/hichamjanati/6668d91848283c31ac18d801552fb582",
image="optimization.gif"),
anvil_cirrus_plumes=Example(title="Anvil Cirrus Plumes",
description="Dr. Morgan O'Neill at Stanford University is researching Above Anvil Cirrus Plumes and its dynamics as a hydraulic jump.",
link="https://www.youtube.com/watch?v=cCPjnF_vHxw&feature=youtu.be",
image="anvil_cirrus_plumes.png"),
damavand=Example(title="Damavand Volcano",
description="Visualize 3D models of Damavand Volcano, Alborz, Iran.",
link="https://nbviewer.jupyter.org/github/banesullivan/damavand-volcano/blob/master/Damavand_Volcano.ipynb",
image="damavand_volcano.gif"),
atmos_conv=Example(title="Atmospheric Convection",
description="Atmospheric convection plays a key role in the climate of tidally-locked terrestrial exoplanets: insights from high-resolution simulations",
link="https://dennissergeev.github.io/exoconvection-apj-2020/",
image="atmospheric_convection.jpeg"),
# entry=Example(title="",
# description="",
# link="",
# image=""),
)
###############################################################################
def make_example_gallery():
filename = "./external_examples.rst"
if os.path.exists(filename):
os.remove(filename)
with open(filename, "w") as f:
f.write("""
External Examples
=================
Here are a list of longer, more technical examples of what PyVista can do!
.. caution::
Please note that these examples link to external websites.
If any of these links are broken, please raise an issue on the repository.
Do you have a technical processing workflow or visualization routine you
would like to share?
If so, please consider sharing your work here submitting a PR at
https://github.com/pyvista and we would be glad to add it!
""")
# Reverse to put the latest items at the top
for Example in list(articles.values())[::-1]:
f.write(Example.format())
f.write("""
.. raw:: html
<div class="sphx-glr-clear"></div>
""")
return
| {
"repo_name": "akaszynski/vtkInterface",
"path": "docs/make_external_gallery.py",
"copies": "1",
"size": "5208",
"license": "mit",
"hash": 2098677476674136600,
"line_mean": 32.3846153846,
"line_max": 161,
"alpha_frac": 0.643625192,
"autogenerated": false,
"ratio": 3.5917241379310343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47353493299310345,
"avg_score": null,
"num_lines": null
} |
"""A helper to make an HTML table from a list of dicts, objects, or sequences.
A set of CSS styles complementing this helper is in
"webhelpers2_grid/html/public/stylesheets/grid.css". To use them, include the
stylesheet in your applcation and set your <table> class to "stylized".
"""
from webhelpers2.html.builder import HTML, literal
GRID_NOT_FOUND = "__GRID_NO_ATTR_FOUND__"
def dummy_value(*args):
return None
class Grid(object):
"""
This class is designed to aid programmer in the task of creation of
tables/grids - structures that are mostly built from datasets.
"""
def __init__(
self,
itemlist,
columns,
column_labels=None,
column_formats=None,
start_number=1,
order_column=None,
order_direction=None,
request=None,
url_generator=None,
exclude_ordering=None,
**kw
):
"""
:param itemlist: items to render
:param columns: names used to for column information
:param column_labels: mapping of column label overrides
:param column_formats: mapping of callables for column rendering
:param start_number: number of first item in grid
:param order_column: column that is being used for ordering
:param order_direction: [asc|desc] string informing of order direction
:param request: request object
:param url_generator: url generator function
:param kw: additional keyword parameters will be stored as ``additional_kw``
:return: Grid
"""
self.labels = column_labels or {}
self.exclude_ordering = (
exclude_ordering if exclude_ordering is not None else columns
)
self.itemlist = itemlist
self.columns = columns
self.column_formats = column_formats or {}
if "_numbered" in columns:
self.labels["_numbered"] = "#"
if "_numbered" not in self.column_formats:
self.column_formats["_numbered"] = self.numbered_column_format
self.start_number = start_number
self.order_dir = order_direction
self.order_column = order_column
# backward compatibility with old pylons grid
if not hasattr(self, "request"):
self.request = request
self.url_generator = url_generator
self.additional_kw = kw
def calc_row_no(self, i):
if self.order_dir in ("asc", None):
return self.start_number + i
else:
return self.start_number - i
def make_headers(self):
header_columns = []
for i, column in enumerate(self.columns):
# let"s generate header column contents
if column in self.labels:
label_text = self.labels[column]
else:
label_text = column.replace("_", " ").title()
# handle non clickable columns
if column in self.exclude_ordering:
header = self.default_header_column_format(i + 1, column, label_text)
# handle clickable columns
else:
header = self.generate_header_link(i + 1, column, label_text)
header_columns.append(header)
return HTML(*header_columns)
def make_columns(self, i, record):
columns = []
row_no = self.calc_row_no(i)
for col_num, column in enumerate(self.columns):
if column in self.column_formats:
r = self.column_formats[column](col_num + 1, row_no, record)
else:
r = self.default_column_format(col_num + 1, row_no, record, column)
columns.append(r)
return HTML(*columns)
def __html__(self):
""" renders the grid """
records = []
# first render headers record
headers = self.make_headers()
r = self.default_header_record_format(headers)
records.append(r)
# now lets render the actual item grid
for i, record in enumerate(self.itemlist):
columns = self.make_columns(i, record)
if hasattr(self, "custom_record_format"):
r = self.custom_record_format(i + 1, record, columns)
else:
r = self.default_record_format(i + 1, record, columns)
records.append(r)
return HTML(*records)
def __str__(self):
return self.__html__()
def generate_header_link(self, column_number, column, label_text):
""" This handles generation of link and then decides to call
``self.default_header_ordered_column_format``
or
``self.default_header_column_format``
based on whether current column is the one that is used for sorting.
"""
# Is the current column the one we're ordering on?
if column == self.order_column:
return self.default_header_ordered_column_format(
column_number, column, label_text
)
else:
return self.default_header_column_format(column_number, column, label_text)
def default_column_format(self, column_number, i, record, column_name):
class_name = "c%s" % column_number
# first try to lookup property
col_value = getattr(record, column_name, GRID_NOT_FOUND)
# if this fails lookup via __getattr__
if col_value is GRID_NOT_FOUND:
col_value = getattr(record, "get", dummy_value)(column_name)
return HTML.tag("td", col_value, class_=class_name)
def numbered_column_format(self, column_number, i, record):
class_name = "c%s" % column_number
return HTML.tag("td", i, class_=class_name)
def default_record_format(self, i, record, columns):
if i % 2 == 0:
class_name = "even r%s" % i
else:
class_name = "odd r%s" % i
return HTML.tag("tr", columns, class_=class_name)
def default_header_record_format(self, headers):
return HTML.tag("tr", headers, class_="header")
def default_header_ordered_column_format(
self, column_number, column_name, header_label
):
dir_char = "▲" if self.order_dir == "asc" else "▼"
header_label = HTML(
header_label, HTML.tag("span", literal(dir_char), class_="marker")
)
if column_name == "_numbered":
column_name = "numbered"
class_name = "c%s ordering %s %s" % (column_number, self.order_dir, column_name)
return HTML.tag("td", header_label, class_=class_name)
def default_header_column_format(self, column_number, column_name, header_label):
if column_name == "_numbered":
column_name = "numbered"
if column_name in self.exclude_ordering:
class_name = "c%s %s" % (column_number, column_name)
return HTML.tag("td", header_label, class_=class_name)
else:
header_label = HTML(header_label, HTML.tag("span", class_="marker"))
class_name = "c%s ordering %s" % (column_number, column_name)
return HTML.tag("td", header_label, class_=class_name)
class ObjectGrid(Grid):
""" Bw. compatibility object
"""
class ListGrid(Grid):
""" A grid class for a sequence of lists.
This grid class assumes that the rows are lists rather than dicts, and
uses subscript access to retrieve the column values. Some constructor args
are also different.
If ``columns`` is not specified in the constructor, it will examine
``itemlist[0]`` to determine the number of columns, and display them in
order. This works only if ``itemlist`` is a sequence and not just an
iterable. Alternatively, you can pass an int to specify the number of
columns, or a list of int subscripts to override the column order.
Examples::
grid = ListGrid(list_data)
grid = ListGrid(list_data, columns=4)
grid = ListGrid(list_data, columns=[1, 3, 2, 0])
``column_labels`` may be a list of strings. The class will calculate the
appropriate subscripts for the superclass dict.
"""
def __init__(self, itemlist, columns=None, column_labels=None, *args, **kw):
"""
:param itemlist:
:param columns:
:param column_labels:
:param args:
:param kw:
:return:
"""
if columns is None:
columns = range(len(itemlist[0]))
elif isinstance(columns, int):
columns = range(columns)
# The superclass requires the ``columns`` elements to be strings.
super_columns = [str(x) for x in columns]
# The superclass requires ``column_labels`` to be a dict.
super_labels = column_labels
if isinstance(column_labels, (list, tuple)):
super_labels = dict(zip(super_columns, column_labels))
Grid.__init__(self, itemlist, super_columns, super_labels, *args, **kw)
def default_column_format(self, column_number, i, record, column_name):
class_name = "c%s" % (column_number)
return HTML.tag("td", record[int(column_name)], class_=class_name)
| {
"repo_name": "ergo/webhelpers_grid2",
"path": "webhelpers2_grid/__init__.py",
"copies": "1",
"size": "9159",
"license": "bsd-3-clause",
"hash": -8955709325842078000,
"line_mean": 36.5368852459,
"line_max": 88,
"alpha_frac": 0.6029042472,
"autogenerated": false,
"ratio": 4.029476462824461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5132380710024461,
"avg_score": null,
"num_lines": null
} |
"""A helper utility to automatically create a database for the DLI App
Author: Logan Gore
This file is responsible for (at the bare minimum) creating the database and
all associated tables for the DLI App. It will import all appropriate models
and ensure that a table for each model exists. If given the command-line
option "--populate" though, it will even populate the database with common
default values.
"""
import argparse
import os
import sys
from dli_app import db
from dli_app.mod_auth.models import Department
from dli_app.mod_auth.models import Location
from dli_app.mod_auth.models import RegisterCandidate
from dli_app.mod_auth.models import User
from dli_app.mod_admin.models import ErrorReport
from dli_app.mod_reports.models import Chart
from dli_app.mod_reports.models import ChartType
from dli_app.mod_reports.models import Field
from dli_app.mod_reports.models import FieldData
from dli_app.mod_reports.models import FieldType
from dli_app.mod_reports.models import Report
from dli_app.mod_reports.models import Tag
from dli_app.mod_wiki.models import WikiPage
WIKIPAGE_HOME_CONTENT = """
# DLI Policies Wiki
Welcome to DLI's Policies Wiki!
"""
PARSER = argparse.ArgumentParser(description='DLI App DB Creation Tool')
PARSER.add_argument(
'-d', '--drop', action='store_true',
help='Drop existing DB tables before recreation'
)
PARSER.add_argument(
'-p', '--populate', action='store_true',
help='Populate the DB with default values after creation'
)
PARSER.add_argument(
'-v', '--verbose', action='store_true',
help='Show extra output about which stage the script is executing'
)
ARGS = PARSER.parse_args()
def vprint(s='', endl='\n'):
"""Print a string if verbose mode is enabled"""
if ARGS.verbose:
sys.stderr.write('{s}{endl}'.format(s=s, endl=endl))
def populate_db_departments():
"""Populate the database Department model"""
departments = [
Department('Account Development'),
Department('Composition'),
Department('Customer Service'),
Department('Electronic Art'),
Department('Order Processing'),
Department('Plates'),
Department('Press'),
Department('Process Color'),
Department('Shipping'),
Department('General Administrative / Other'),
]
db.session.add_all(departments)
db.session.commit()
def populate_db_locations():
"""Populate the database Location model"""
locations = [
Location('New Albany'),
Location('Omaha'),
]
db.session.add_all(locations)
db.session.commit()
def populate_db_users():
"""Populate the database User model"""
users = [
User(
name='Nobody',
email='nobody@dlireports.com',
password=os.environ['DLI_REPORTS_ADMIN_PASSWORD'],
location=Location.query.first(),
department=Department.query.first(),
),
]
# Set the "Nobody" user to be an admin by default
users[0].is_admin = True
db.session.add_all(users)
db.session.commit()
def populate_db_fieldtypes():
"""Populate the database FieldType model"""
types = [
FieldType('currency'),
FieldType('double'),
FieldType('integer'),
FieldType('string'),
FieldType('time'),
]
db.session.add_all(types)
db.session.commit()
def populate_db_fields():
"""Populate the database Field model
Populate the database Field model by adding all fields that DLI currently
uses on its 8:40 morning report. This will allow us to quickly have a
populated and complete database whenever we have to drop all tables.
"""
from dli_app.mod_reports.models import FieldTypeConstants
FieldTypeConstants.reload()
ad = Department.query.filter_by(name='Account Development').first()
comp = Department.query.filter_by(name='Composition').first()
cs = Department.query.filter_by(name='Customer Service').first()
ea = Department.query.filter_by(name='Electronic Art').first()
op = Department.query.filter_by(name='Order Processing').first()
plates = Department.query.filter_by(name='Plates').first()
press = Department.query.filter_by(name='Press').first()
pc = Department.query.filter_by(name='Process Color').first()
shipping = Department.query.filter_by(name='Shipping').first()
fields = [
# Department: Account Development
Field(
name='Orders Entered',
ftype=FieldTypeConstants.INTEGER,
department=ad,
),
Field(
name='Sales Entered',
ftype=FieldTypeConstants.CURRENCY,
department=ad,
),
Field(
name='Orders Pending',
ftype=FieldTypeConstants.INTEGER,
department=ad,
),
Field(
name='Sales Pending',
ftype=FieldTypeConstants.CURRENCY,
department=ad,
),
Field(
name='Total In House Sales',
ftype=FieldTypeConstants.CURRENCY,
department=ad,
),
Field(
name='New Albany Sales',
ftype=FieldTypeConstants.CURRENCY,
department=ad,
),
Field(
name='Omaha Sales',
ftype=FieldTypeConstants.CURRENCY,
department=ad,
),
Field(
name='Quotes Given',
ftype=FieldTypeConstants.INTEGER,
department=ad,
),
Field(
name='Quotes Pending',
ftype=FieldTypeConstants.INTEGER,
department=ad,
),
# Department: Composition
Field(
name='Orders',
ftype=FieldTypeConstants.INTEGER,
department=comp,
),
Field(
name='Copies',
ftype=FieldTypeConstants.INTEGER,
department=comp,
),
Field(
name='In Department',
ftype=FieldTypeConstants.INTEGER,
department=comp,
),
# Department: Customer Service
Field(
name='In House',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='Due',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='CMYK',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='Redos',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='DLI On Time Percentage',
ftype=FieldTypeConstants.DOUBLE,
department=cs,
),
Field(
name='CMYK On Time Percentage',
ftype=FieldTypeConstants.DOUBLE,
department=cs,
),
Field(
name='Omaha On Time Percentage',
ftype=FieldTypeConstants.DOUBLE,
department=cs,
),
Field(
name='Total On Time Percentage',
ftype=FieldTypeConstants.DOUBLE,
department=cs,
),
Field(
name='Omaha Redos',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='Total Lates',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='2-Day Lates',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='3-Day Lates',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='CMYK Lates',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='Omaha Lates',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='CS Number of Calls',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='CS Number Answered',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='CS Percentage Within Service',
ftype=FieldTypeConstants.DOUBLE,
department=cs,
),
Field(
name='CS Average Delay',
ftype=FieldTypeConstants.TIME,
department=cs,
),
Field(
name='Status Number of Calls',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='Status Number Answered',
ftype=FieldTypeConstants.INTEGER,
department=cs,
),
Field(
name='Status Percentage Within Service',
ftype=FieldTypeConstants.DOUBLE,
department=cs,
),
Field(
name='Status Average Delay',
ftype=FieldTypeConstants.TIME,
department=cs,
),
# Department: Electronic Art
Field(
name='Orders',
ftype=FieldTypeConstants.INTEGER,
department=ea,
),
Field(
name='Copies',
ftype=FieldTypeConstants.INTEGER,
department=ea,
),
Field(
name='In Department',
ftype=FieldTypeConstants.INTEGER,
department=ea,
),
# Department: Order Processing
Field(
name='Adjusted Sales',
ftype=FieldTypeConstants.CURRENCY,
department=op,
),
Field(
name='Backlog',
ftype=FieldTypeConstants.CURRENCY,
department=op,
),
Field(
name='Total Labels Entered',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Manual Entered',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='EW Entered',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total Stamps Entered',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total Entered',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='EW Pending',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='EW OE Average',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Percentage in EW',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='EW Reorders',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Manual to Composition',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Manual to Shipping',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Manual to Press',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total Manual',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total DLI EW',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total Omaha EW',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total into Production',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Quotes Received (#)',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Quotes Received ($)',
ftype=FieldTypeConstants.CURRENCY,
department=op,
),
Field(
name='Screenprint Received (#)',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Screenprint Received ($)',
ftype=FieldTypeConstants.CURRENCY,
department=op,
),
Field(
name='Magnets Received (#)',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Magnets Received ($)',
ftype=FieldTypeConstants.CURRENCY,
department=op,
),
Field(
name='Online Quotes',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total Quotes',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Online Quotes Percentage',
ftype=FieldTypeConstants.DOUBLE,
department=op,
),
Field(
name='Web Order Status',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='LabelNet Orders',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='LabelNet Sales',
ftype=FieldTypeConstants.CURRENCY,
department=op,
),
Field(
name='Manual Left Over',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='EW Left Over',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Quotes Left Over',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Omaha Left Over',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='OP Catalog Requests Given',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='OP Catalog Requests Pending',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='CQ Requests Given',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='CQ Requests Pending',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='OP QA Pending',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Quote QA Pending',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='CMYK QA Pending',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='OP Number of Calls',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='OP Number Answered',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='OP Percentage Within Service',
ftype=FieldTypeConstants.DOUBLE,
department=op,
),
Field(
name='OP Average Delay',
ftype=FieldTypeConstants.TIME,
department=op,
),
Field(
name='CQ Number of Calls',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='CQ Number Answered',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='CQ Percentage Within Service',
ftype=FieldTypeConstants.DOUBLE,
department=op,
),
Field(
name='CQ Average Delay',
ftype=FieldTypeConstants.TIME,
department=op,
),
Field(
name='DLI EW Projection',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Omaha EW Projection',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Manual Projection',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Saturday Projection',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
Field(
name='Total Projection',
ftype=FieldTypeConstants.INTEGER,
department=op,
),
# Department: Plates
Field(
name='To Press Due (New Albany)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Late (New Albany)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Total (New Albany)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Due (Omaha)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Late (Omaha)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Total (Omaha)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Due (Process)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Late (Process)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='To Press Total (Process)',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='Total Orders',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='Total Copies',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='Saturday Orders',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='Saturday Copies',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='In Department Due',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='In Department Late',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='In Department Total',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='Press Returns',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
Field(
name='Prepress Returns',
ftype=FieldTypeConstants.INTEGER,
department=plates,
),
# Department: Press
Field(
name='Day Orders',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Day Copies',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Night Orders',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Night Copies',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='3rd Shift Orders',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='3rd Shift Copies',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Saturday Orders',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Saturday Copies',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Total Orders',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Total Copies',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average Imp',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average Tri',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average Process',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average Laser',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average 650',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average 650C',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average 450',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average HP',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Average HP Total',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Total Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Imp Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Tri Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='450 Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='650 Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Digital Spot Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Ditial CMYK Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Flexo CMYK Lates',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='In Department',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='To Print',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Projected',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Days',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Nights',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='3rd Shift',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Saturdays',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: Imp',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: Imp Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: C#',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: C# Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: FF/EMB',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: FF/EMB Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: 650',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: 650 Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: 650C',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: 650C Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: 450',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: 450 Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: REW',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: REW Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: P Laser',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: P Laser Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: BLASER',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: BLASER Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: Process',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: Process Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: TRI/UB',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: TRI/UB Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: HP',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: HP Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: ABG',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: ABG Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: OCE',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: OCE Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: Jet',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: Jet Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: RDC',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: RDC Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
Field(
name='Orders on Press: LDC',
ftype=FieldTypeConstants.INTEGER,
department=press,
),
Field(
name='Orders on Press: LDC Status',
ftype=FieldTypeConstants.STRING,
department=press,
),
# Department: Process Color
Field(
name='Digital in Production',
ftype=FieldTypeConstants.INTEGER,
department=pc,
),
Field(
name='Digital In Department',
ftype=FieldTypeConstants.INTEGER,
department=pc,
),
Field(
name='Flexo in Production',
ftype=FieldTypeConstants.INTEGER,
department=pc,
),
Field(
name='Flexo In Department',
ftype=FieldTypeConstants.INTEGER,
department=pc,
),
# Department: Shipping
Field(
name='Day Orders',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Day Boxes',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Night Orders',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Night Boxes',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Total Orders',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Total Copies',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Saturday Orders',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Saturday Copies',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Week Orders',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Week Copies',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Shipped Early',
ftype=FieldTypeConstants.CURRENCY,
department=shipping,
),
Field(
name='In Department',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Late Orders',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Orders Packed',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='SWOG Holding',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='New Albany Rejects',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Omaha Rejects',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Projected',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Days',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Nights',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Saturday',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Number of Trucks',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Number of Boxes',
ftype=FieldTypeConstants.INTEGER,
department=shipping,
),
Field(
name='Total Truck Sales',
ftype=FieldTypeConstants.CURRENCY,
department=shipping,
),
]
db.session.add_all(fields)
db.session.commit()
def populate_db_tags():
"""Populate the database Tag model"""
tags = [
Tag('all'),
Tag('dli'),
Tag('morning'),
]
db.session.add_all(tags)
db.session.commit()
def populate_db_reports():
"""Populate the database Report model"""
reports = [
Report(
user=User.query.first(),
name='8:40 Report',
fields=Field.query.all(),
tags=Tag.query.all(),
),
]
db.session.add_all(reports)
db.session.commit()
def populate_db_charttypes():
"""Populate the database ChartType model"""
ctypes = [
ChartType('line'),
ChartType('bar'),
ChartType('pie'),
ChartType('table only'),
]
db.session.add_all(ctypes)
db.session.commit()
def populate_db_charts():
"""Populate the database Chart model"""
from dli_app.mod_reports.models import ChartTypeConstants
ChartTypeConstants.reload()
charts = [
Chart(
name='Adjusted Sales (from start of week)',
user=User.query.first(),
with_table=True,
ctype=ChartTypeConstants.LINE,
fields=[Field.query.filter_by(name='Adjusted Sales').first()],
tags=Tag.query.all(),
),
Chart(
name='Customer Service On Time Percentage (DLI vs. Omaha)',
user=User.query.first(),
with_table=True,
ctype=ChartTypeConstants.BAR,
fields=[
Field.query.filter_by(name='DLI On Time Percentage').first(),
Field.query.filter_by(name='Omaha On Time Percentage').first(),
],
tags=Tag.query.all(),
),
Chart(
name='Press Breakdown',
user=User.query.first(),
with_table=False,
ctype=ChartTypeConstants.PIE,
fields=Field.query.filter(
Field.name.like('Orders on Press%')).filter(
~Field.name.like('%Status')).all(),
tags=Tag.query.all(),
),
]
db.session.add_all(charts)
db.session.commit()
def populate_db_wikipages():
"""Populate the database WikiPage model"""
pages = [
WikiPage(
name='home',
content=WIKIPAGE_HOME_CONTENT,
),
]
db.session.add_all(pages)
db.session.commit()
def populate_db_all():
"""Completely populate a basic db for DLI"""
if 'DLI_REPORTS_ADMIN_PASSWORD' not in os.environ:
print('Please set env variable DLI_REPORTS_ADMIN_PASSWORD first.')
return False
vprint('Starting DB population script...')
populate_db_departments()
vprint('Department model populated.')
populate_db_locations()
vprint('Location model populated.')
populate_db_users()
vprint('User model populated.')
populate_db_fieldtypes()
vprint('FieldType model populated.')
populate_db_fields()
vprint('Field model populated.')
populate_db_tags()
vprint('Tag model populated.')
populate_db_reports()
vprint('Report model populated.')
populate_db_charttypes()
vprint('ChartType model populated.')
populate_db_charts()
vprint('Chart model populated.')
populate_db_wikipages()
vprint('Wikipage model populated.')
vprint()
vprint('DB population script complete.')
return True
if __name__ == '__main__':
vprint('CreateDB script loaded.')
if ARGS.drop:
vprint('Dropping all existing data first!')
db.session.close()
db.drop_all()
vprint('DB dropped.')
db.create_all()
vprint('All database models created.')
res = True
if ARGS.populate:
res = populate_db_all()
if res:
vprint('CreateDB script exiting successfully.')
else:
vprint('CreateDB script exited with failure!')
| {
"repo_name": "gorel/dli-reports",
"path": "create_db.py",
"copies": "1",
"size": "37018",
"license": "mit",
"hash": -1513442793696105700,
"line_mean": 26.875,
"line_max": 79,
"alpha_frac": 0.5201793722,
"autogenerated": false,
"ratio": 4.6516712741894946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008002833615629627,
"num_lines": 1328
} |
#A helpful little script by A. Colwell (2014).
from graphics import *
win1 = GraphWin('graphics.py Guide',600,400)
circ = Circle(Point(50,30),25)
circ.draw(win1)
labelCirc = Text(Point(200,25),'Circle at 50,30 with radius of 25')
labelCirc.draw(win1)
lineCirc = Line(Point(75,55),Point(50,30))
lineCirc.setFill('red')
lineCirc.setArrow('last')
lineCirc.draw(win1)
rect = Rectangle(Point(20,100),Point(60,140))
rect.draw(win1)
labelRect = Text(Point(250,120),'Rectangle UpLeft(20,100),LowRight(60,140)')
labelRect.draw(win1)
line1Rect = Line(Point(20,70),Point(20,100))
line1Rect.setFill('red')
line1Rect.setArrow('last')
line1Rect.draw(win1)
line2Rect = Line(Point(90,140),Point(60,140))
line2Rect.setFill('red')
line2Rect.setArrow('last')
line2Rect.draw(win1)
labelOval = Text(Point(250,240),'Oval UpLeft(20,200),LowRight(60,280)')
labelOval.draw(win1)
line1Oval = Line(Point(20,180),Point(20,200))
line1Oval.setFill('red')
line1Oval.setArrow('last')
line1Oval.draw(win1)
line2Oval = Line(Point(90,280),Point(60,280))
line2Oval.setFill('red')
line2Oval.setArrow('last')
line2Oval.draw(win1)
rectOval = Rectangle(Point(20,200),Point(60,280))
rectOval.setOutline('red')
rectOval.draw(win1)
oval = Oval(Point(20,200),Point(60,280))
oval.draw(win1)
poly = Polygon(Point(40,320),Point(80,360),Point(20,340))
poly.draw(win1)
labelPoly = Text(Point(250,340),'Polygon - (40,320),(80,360),(20,340) ')
labelPoly.draw(win1)
line1Poly = Line(Point(40,290),Point(40,320))
line1Poly.setFill('red')
line1Poly.setArrow('last')
line1Poly.draw(win1)
#This part just keeps the window open when not run from interactive shell
def main():
coord=win1.getMouse()
x=coord.getX()
y=coord.getY()
return x,y
main()
| {
"repo_name": "MrColwell/PythonProfessionalLearning",
"path": "PythonForTeachers/studentExercises/graphics_guide.py",
"copies": "1",
"size": "1784",
"license": "mit",
"hash": 6244709449616433000,
"line_mean": 25.875,
"line_max": 76,
"alpha_frac": 0.7012331839,
"autogenerated": false,
"ratio": 2.4404924760601916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36417256599601916,
"avg_score": null,
"num_lines": null
} |
"""A Henson plugin to integrate Sentry."""
import asyncio
import os as _os
import pkg_resources as _pkg_resources
from henson import Extension
from raven import Client
from raven.conf import defaults
from raven.utils.imports import import_string
from raven_aiohttp import AioHttpTransport
__all__ = ('Sentry',)
try:
_dist = _pkg_resources.get_distribution(__name__)
if not __file__.startswith(_os.path.join(_dist.location, __name__)):
# Manually raise the exception if there is a distribution but
# it's installed from elsewhere.
raise _pkg_resources.DistributionNotFound
except _pkg_resources.DistributionNotFound:
__version__ = 'development'
else:
__version__ = _dist.version
class Sentry(Extension):
"""A class to integrate Sentry."""
DEFAULT_SETTINGS = {
'RAVEN_IGNORE_EXCEPTIONS': (),
'SENTRY_AUTO_LOG_STACKS': defaults.AUTO_LOG_STACKS,
'SENTRY_DSN': None,
'SENTRY_EXCLUDE_PATHS': (),
'SENTRY_INCLUDE_PATHS': (),
'SENTRY_MAX_LENGTH_LIST': defaults.MAX_LENGTH_LIST,
'SENTRY_MAX_LENGTH_STRING': defaults.MAX_LENGTH_STRING,
'SENTRY_NAME': None,
'SENTRY_PROCESSORS': None,
'SENTRY_RELEASE': None,
'SENTRY_SITE_NAME': None,
'SENTRY_TAGS': None,
'SENTRY_TRANSPORT': AioHttpTransport,
}
_client = None
def init_app(self, app):
"""Initialize an ``Application`` instance.
Args:
app (henson.base.Application): The application instance to
be initialized.
"""
super().init_app(app)
if not app.settings['SENTRY_DSN']:
app.logger.info('sentry.disabled')
return
if not app.settings['SENTRY_NAME']:
app.settings['SENTRY_NAME'] = app.name
if not self._client:
self._client = _make_client(app)
app.error(self._handle_exception)
app.message_acknowledgement(self._after_message)
@asyncio.coroutine
def capture_exception(self, exc_info=None, **kwargs):
"""Create an event from an exception."""
self._client.captureException(exc_info, **kwargs)
@asyncio.coroutine
def capture_message(self, message, **kwargs):
"""Create an event from ``message``."""
self._client.captureMessage(message, **kwargs)
@asyncio.coroutine
def _after_message(self, app, message):
self._client.context.clear()
@asyncio.coroutine
def _handle_exception(self, app, message, exc):
if isinstance(exc, self.app.settings['RAVEN_IGNORE_EXCEPTIONS']):
return
yield from self.capture_exception(message=message)
def _make_client(app):
transport = app.settings['SENTRY_TRANSPORT']
if isinstance(transport, str):
transport = import_string(transport)
return Client(
dsn=app.settings['SENTRY_DSN'],
transport=transport,
include_paths=app.settings['SENTRY_INCLUDE_PATHS'],
exclude_paths=app.settings['SENTRY_EXCLUDE_PATHS'],
name=app.settings['SENTRY_NAME'],
site_name=app.settings['SENTRY_SITE_NAME'],
processors=app.settings['SENTRY_PROCESSORS'],
string_max_length=app.settings['SENTRY_MAX_LENGTH_STRING'],
list_max_length=app.settings['SENTRY_MAX_LENGTH_LIST'],
auto_log_stacks=app.settings['SENTRY_AUTO_LOG_STACKS'],
tags=app.settings['SENTRY_TAGS'],
release=app.settings['SENTRY_RELEASE'],
extra={
'app': app,
},
)
| {
"repo_name": "dirn/Henson-Sentry",
"path": "henson_sentry.py",
"copies": "1",
"size": "3544",
"license": "mit",
"hash": 3719142768433842700,
"line_mean": 30.3628318584,
"line_max": 73,
"alpha_frac": 0.6286681716,
"autogenerated": false,
"ratio": 3.810752688172043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49394208597720424,
"avg_score": null,
"num_lines": null
} |
# Ahhh requests w/ freegeoip API
from optparse import OptionParser
import logging
import requests
import json
def queryFreeGeo(query):
response = requests.get('http://www.freegeoip.net/json/' + query)
api_response = []
if response.status_code == 200:
body = json.loads(response.text)
for key, value in body.iteritems():
api_response.append(str(key) + ': ' + str(value))
return '\n'.join(api_response)
return "Apologies but freegeoip.net is not responding right now."
# Main function with options for running script directly
def main():
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# Option for URL to upload to
optp.add_option("-i", "--ip", dest="query",
help="The IP address you want to lookup")
opts, args = optp.parse_args()
if opts.query is None:
opts.query = raw_input("What's your IP query? ")
results = queryFreeGeo(opts.query)
print results
if __name__ == '__main__':
main()
| {
"repo_name": "ahhh/SPSE",
"path": "requests_freegeoip_API.py",
"copies": "1",
"size": "1548",
"license": "mit",
"hash": -4217524779756073500,
"line_mean": 29.96,
"line_max": 67,
"alpha_frac": 0.6279069767,
"autogenerated": false,
"ratio": 3.8034398034398036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9526906479225742,
"avg_score": 0.08088806018281235,
"num_lines": 50
} |
# Ahhh requests w/ Malwr API
from optparse import OptionParser
import logging
import requests
from requests.auth import HTTPBasicAuth
# Function to run our upload
def uploadToMalwr(file_upload, apikey):
url = 'https://malwr.com/api/analysis/add/'
files = {'file': (file_upload, open(file_upload, 'rb'), 'application/octet-stream', {'Expires': '0'})}
data = {'api_key': apikey, 'shared': 'yes', 'force': 'True'}
return requests.post(url, files=files, data=data, verify=True)
# Function to run our check
def checkMalwr(uuid, apikey):
url = 'https://malwr.com/api/analysis/status/'
data = {'api_key': apikey, 'uuid': uuid}
return requests.post(url, verify=True)
# Main function with options for running script directly
def main():
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# Option for file to upload
optp.add_option("-f", "--file", dest="file_upload",
help="The file you want to upload")
# Option for hash to download
optp.add_option("-i", "--id", dest="uuid",
help="The hash of the file you want to download")
# Option for URL to upload to
optp.add_option("-k", "--key", dest="apikey",
help="The apikey for the service")
opts, args = optp.parse_args()
# Prompt if the user disn't give a apikey
if opts.apikey is None:
opts.apikey = raw_input("What's your Malwr API key? ")
results = "You need to upload a file (-f) or look up a uuid (-c)"
# Run upload if the user gave us a file to upload
if opts.file_upload is not None:
# Run our upload function
results = uploadToMalwr(opts.file_upload, opts.apikey)
# Run Download if the user gave us a hash to get
if opts.uuid is not None:
# Run our download function
results = checkMalwr(opts.uuid, opts.apikey)
for result in results:
print result
if __name__ == '__main__':
main()
| {
"repo_name": "ahhh/SPSE",
"path": "requests_malwr_API.py",
"copies": "1",
"size": "2473",
"license": "mit",
"hash": -1946090112263411500,
"line_mean": 33.3472222222,
"line_max": 104,
"alpha_frac": 0.6328346138,
"autogenerated": false,
"ratio": 3.615497076023392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4748331689823392,
"avg_score": null,
"num_lines": null
} |
# Ahhh requests w/ ShadowServer API
from optparse import OptionParser
import logging
import requests
# Function to run our check
def checkSS(ioc):
url = "https://innocuous.shadowserver.org/api/?query=" + ioc
if "," in ioc:
url = "https://innocuous.shadowserver.org/api/?query="
args = ioc.split(",")
results = []
for arg in args:
results.append(requests.get(url + arg).text)
return "Shadow Server Results: " + "\n\n".join(results)
results = requests.get(url)
return "Shadow Server Results: " + results.text
# Main function with options for running script directly
def main():
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# Option for hash to download
optp.add_option("-i", "--ioc", dest="ioc",
help="The hash, ip, or domain of the ioc you want to check")
opts, args = optp.parse_args()
# Prompt if the user disn't give an ioc
if opts.ioc is None:
opts.ioc = raw_input("What's your IOC (Hash, ip, domain)? ")
results = checkSS(opts.ioc)
print results
if __name__ == '__main__':
main()
| {
"repo_name": "ahhh/SPSE",
"path": "requests_shadowserver_API.py",
"copies": "1",
"size": "1659",
"license": "mit",
"hash": -4461268757921220000,
"line_mean": 29.7222222222,
"line_max": 78,
"alpha_frac": 0.6202531646,
"autogenerated": false,
"ratio": 3.638157894736842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9417430538544963,
"avg_score": 0.06819610415837586,
"num_lines": 54
} |
# Ahhh requests w/ Viper API
from optparse import OptionParser
import logging
import requests
from requests.auth import HTTPBasicAuth
# Function to run our upload
def uploadToViper(user, passw, file_upload, endpoint):
url = 'http://'+endpoint+'/file/add'
files = {'file': (file_upload, open(file_upload, 'rb'), 'application/octet-stream', {'Expires': '0'})}
data = {'file_name': file_upload}
return requests.post(url, auth=HTTPBasicAuth(user, passw), files=files, data=data, verify=False)
# Function to run our download
def downloadFromViper(user, passw, hash, endpoint):
url = 'http://'+endpoint+'/file/get/'+hash
return requests.get(url, auth=HTTPBasicAuth(user, passw), verify=False)
# Main function with options for running script directly
def main():
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# Option for user for basic auth
optp.add_option("-u", "--user", dest="user",
help="The user for the basic auth")
# Option for password for basic auth
optp.add_option("-p", "--passw", dest="passw",
help="The password for the basic auth")
# Option for file to upload
optp.add_option("-f", "--file", dest="file_upload",
help="The file you want to upload")
# Option for hash to download
optp.add_option("-m", "--md5", dest="file_hash",
help="The hash of the file you want to download")
# Option for URL to upload to
optp.add_option("-e", "--endpoint", dest="api_endpoint",
help="The url you want to upload to")
opts, args = optp.parse_args()
# Prompt if the user dosn't give creds for basic auth
if opts.user is None:
opts.user = raw_input("What user are you giving to log in? ")
# Prompt if the user dosn't give creds for basic auth
if opts.passw is None:
opts.passw = raw_input("What pass are you giving to log in? ")
# Prompt if the user disn't give a url to upload
if opts.api_endpoint is None:
opts.api_endpoint = raw_input("What's the host of the Viper API endpoint? ")
# Example: opts.api_endpoint = 'viperhost:9002'
results = "You need to upload a file (-f) or look up a hash (-m)"
# Run upload if the user gave us a file to upload
if opts.file_upload is not None:
# Run our upload function
results = uploadToViper(opts.user, opts.passw, opts.file_upload, opts.api_endpoint)
# Run Download if the user gave us a hash to get
if opts.file_hash is not None:
# Run our download function
results = downloadFromViper(opts.user, opts.passw, opts.file_hash, opts.api_endpoint)
for result in results:
print result
if __name__ == '__main__':
main()
| {
"repo_name": "ahhh/SPSE",
"path": "requests_viper_API.py",
"copies": "1",
"size": "3263",
"license": "mit",
"hash": -5327346186200123000,
"line_mean": 35.6629213483,
"line_max": 104,
"alpha_frac": 0.6444989274,
"autogenerated": false,
"ratio": 3.629588431590656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9319806789399898,
"avg_score": 0.09085611391815165,
"num_lines": 89
} |
"""A Hiero task and preset for sending shots to a Stone FS via Wiretap.
@author Brendan Holt
@date March 2014
@see \ref modStonify
@defgroup modFnStonify FnStonify
@{
"""
import os.path
import platform
from Queue import Queue, Empty
import re
import subprocess
import sys
from threading import Thread
import hiero.core
from hiero.core import Timecode # only accessible when Hiero is running
import Path
from wiretap import WireTapException
from WiretapBrowser import SplitNodePath
from WiretapTools import SplitHostname
## \c{(re.RegexObject)} A cached regular expression object for parsing the
# progress reported in the Stonify script's standard output.
PROGRESS_REGEX = re.compile(r'^Wrote frame (\d+) of (\d+)\.$')
class StonifyTask(hiero.core.TaskBase):
"""A custom Hiero task that copies the frames from the selected shot to the
designated Wiretap clip node.
@details This task launches an external process through the Stonify script.
"""
def __init__(self, initDict):
"""Constructs a Stonify task given an initialization dictionary.
@param[in] initDict \c{(FnExporterBase.TaskData)} Task initialization
data.
"""
super(StonifyTask, self).__init__(initDict)
## \c{(Queue)} Stores messages from the Stonify script's standard
# output.
self.__stdoutQueue = Queue()
## \c{(int)} The index of the current frame being copied.
self.__frameIndex = 0
## \c{(int)} The starting frame's file number in the output range.
self.__start = 0
## \c{(int)} The ending frame's file number in the output range.
self.__end = 0
self.__start, self.__end = self.outputRange()
## \c{(int)} The total number of frames in the selected shot.
self.__numFrames = self.__end - self.__start + 1
## \c{(hiero.core.TimeBase)} The sequence frame rate.
self.__timeBase = self._sequence.framerate()
## \c{(bool)} Whether the sequence timecode is displayed in drop frame
# format.
#
# @see Documentation was copied from
# \c{hiero.core.Sequence.dropFrame()}.
self.__isDropFrame = self._sequence.dropFrame()
## \c{(bool)} Whether the associated media is a movie container.
self.__isVideoFile = hiero.core.isVideoFileExtension(self.fileext())
def startTask(self):
"""Analyzes the source media and collects the necessary Wiretap
parameters in order to execute the Stonify script.
@details Called when task reaches head of the export queue and begins
execution.
@see Documentation was partially copied from
\c{hiero.core.FnExporterBase.TaskBase.startTask()}.
"""
# Don't call TaskBase.startTask() because it will try to make
# directories. Instead, just run the pre-sequence method.
self.preSequence()
if not self._preset.properties()['isWiretapShotProcess']:
print("WARNING: Stonify tasks should be executed with the Wiretap "
"Shot Processor.")
# The following checks for media presence and cut handles were borrowed
# from hiero.exporters.FnFrameExporter
if not self._source.isMediaPresent() and self._skipOffline:
return
if self._cutHandles is not None:
if self._retime:
raise NotImplementedError("Retimes are not yet supported when "
"exporting via Wiretap.")
scriptPath = os.path.join(os.path.dirname(__file__),
'../scripts/Stonify.py')
scriptPath = os.path.normpath(scriptPath)
# Format the source and destination node paths
hostname, dstParentNodeID, dstClipName = self.__ResolvePaths()
if self.__isVideoFile: # TO DO: only necessary if specifically ProRes
srcHost = self._preset.propertiesValue('osxGatewayHost')
else:
srcHost = hostname + ':Gateway'
dstHost = hostname + ':IFFFS'
srcNodeID = self.FormatSourcePath()
# LIBRARY/REEL/CLIP creation parameters set to '1' or str(int(True))
useDisplayName = '1' # TO DO: provide a toggle for this case
createParentNodeID = '1'
overwriteClip = '1'
# Format the frame and time information
# NOTE: Like Hiero, Wiretap interprets 23.98 FPS as 23.976 FPS; can use
# string labels instead of float conversion
frameRate = self.__timeBase.toString()
dropMode = 'DF' if self.__isDropFrame else 'NDF'
startTimecode = self.__GetStartTimecode()
command = [
GetBinaryPath('python'), scriptPath,
srcHost, srcNodeID,
dstHost, dstParentNodeID, dstClipName,
useDisplayName, createParentNodeID, overwriteClip,
frameRate, dropMode, startTimecode
]
# Wiretap cannot reference a sub-clip of a video file with "@CLIP"
# syntax, so pass the frame range as well
if self.__isVideoFile:
command.extend(map(str, [self.__start, self.__end]))
ON_POSIX = 'posix' in sys.builtin_module_names
self.process = subprocess.Popen(command, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
close_fds=ON_POSIX)
# Monitor stdout in a separate thread to track upload progress
stdoutMonitor = Thread(target=self.EnqueueOutput,
args=(self.process.stdout, self.__stdoutQueue))
stdoutMonitor.daemon = True
stdoutMonitor.start()
def taskStep(self):
"""Polls the Stonify subprocess and parses its standard output to
determine task progress.
@details Called every frame until task completes. The task may
continue to run in the background.
@retval True Indicates that the task requires more steps.
@retval False Indicates synchronous processing of the task is complete.
@see Documentation was partially copied from
\c{hiero.core.FnExporterBase.TaskBase.taskStep()}.
"""
returncode = self.process.poll()
if returncode: # there are errors
raise WireTapException(self.__CollectErrors())
# Attempt to read output from the queue without blocking
try:
line = self.__stdoutQueue.get_nowait()
except Empty: # no output yet
pass
else: # update the current frame index based on the subprocess output
try:
self.__frameIndex = self.ParseProgressOutput(line.strip())[0]
except IndexError: # output didn't contain frame information
pass
return self.__frameIndex < self.__numFrames
def progress(self):
"""Reports a value between 0 and 1 to indicate the progress of the
task.
@details The task is considered complete once the progress equals
1.
@return \c{(float)} The fraction of work completed.
@see Documentation was partially copied from
\c{hiero.core.FnExporterBase.TaskBase.progress()}.
"""
if self._finished:
return 1.0
return float(self.__frameIndex)/self.__numFrames
def forcedAbort(self):
"""Terminates the Stonify subprocess if it is still running.
@details Called by Hiero when the user presses the Abort
button.
@see Documentation was partially copied from \c{ITask.forcedAbort()} in
the <em>Hiero Python Developer Guide</em>.
"""
if hasattr(self, 'process') and self.process.poll() is None:
try:
self.process.terminate()
except OSError as why:
print "Failed to terminate the Wiretap process: " + str(why)
def __ResolvePaths(self):
"""Extracts the Wiretap server hostname, parent node ID, and clip name
from this task's shot path.
@throws Raises a <a href="http://docs.python.org/2.6/library/exceptions.html#exceptions.ValueError" target="_blank">
ValueError</a> if the shot path does not have the minimum
number of segments (i.e. a path in the form of
\c{/VOLUME/PROJECT/LIBRARY/[REEL]/CLIP}).
@return \c{(tuple)} The resolved Wiretap server hostname, parent node
ID, and clip name.
"""
# Decompose the shot path into the hostname, parent node ID, and clip
# node name. The hostname should not contain the product name (e.g.
# Vid_WS17:IFFFS), but just in case, remove the colon and the text
# after it.
parentNodePath, clipName = os.path.split(self._shotPath)
hostname, parentNodeID = SplitNodePath(parentNodePath)
hostname = SplitHostname(hostname)[0] # truncate text after colon
# Perform limited path validation
# NOTE: Node paths with blank display names are not supported because
# Hiero does not let you set blank folder names in the export structure
# viewer.
segments = [seg for seg in parentNodeID.split('/') if seg]
if len(segments) < 3:
raise ValueError("Shots/clips sent to a Stone FS require a "
"volume, project, library, and optionally a reel "
"node in the parent node path: "
"/VOLUME/PROJECT/LIBRARY/[REEL]/CLIP")
# Resolve tokens
return tuple(map(self.resolvePath, [hostname, parentNodeID, clipName]))
def EnqueueOutput(self, fileHandle, queue):
"""Enqueues each line of the standard output from an external process.
@param[in] fileHandle \c{(file)} The standard output stream from the
external process.
@param[in] queue \c{(Queue.Queue)} The object for holding the queued
output lines.
@see <a href="http://stackoverflow.com/a/4896288" target="_blank">
Non-blocking read on a subprocess.PIPE</a>
"""
for line in iter(fileHandle.readline, b''):
queue.put(line)
fileHandle.close()
def ParseProgressOutput(self, text):
"""Parses the Stonify subprocess output to determine the index of the
frame that was copied.
@param[in] text \c{(str)} An output line in the form "Wrote frame
[index] of [total]."
@return \c{(tuple)} The current integer frame index and total number of
frames.
@see \ref FnStonify.PROGRESS_REGEX() "FnStonify.PROGRESS_REGEX"
"""
print text
matchObj = PROGRESS_REGEX.match(text)
if matchObj:
return tuple([int(ii) for ii in matchObj.groups()])
return ()
def __CollectErrors(self):
"""Collects the exception(s) from the Stonify subprocess' standard
error stream.
@details \c{stderr} is only read if the Stonify subprocess has
ended.
@return \c{(str)} The exception trace and message, if any.
"""
# It is unknown if reading from stderr blocks execution, so only read
# if the subprocess is not running. If the return code is 0, there
# probably won't be errors anyway. In either case, this function will
# return an empty string.
if self.process.poll() is not None:
return '\n'.join(self.process.stderr.readlines())
return ''
def FormatSourcePath(self):
"""Formats the path to the media source for Wiretap Gateway server
access.
@details If the source is an image sequence, the frame index is removed
and substituted with a frame range denoted by square brackets
before the file extension. For both images and video files,
"@CLIP" is appended after the file extension.
@return \c{(str)} The formatted path to the media source.
"""
fileInfo = self._source.fileinfos()[0]
# Format a single frame or a movie file.
# Note: This check doesn't work on filenames ending in a number.
if self._source.singleFile():
# NOTE: Movie files do not support specific frame ranges, so the
# start and end frames must be forwarded to the subprocess in
# startTask().
sourcePath = Path.Normalize(fileInfo.filename() + '@CLIP')
# Format an image sequence
else:
# Hiero identifies a standalone frame ending in a number as part of
# a sequence, so do not use a bracketed range in this case
if self.__start == self.__end:
frameRange = str(self.__start)
else: # only add range brackets if there is more than one frame
frameRange = '[{0}-{1}]'.format(
self.filepadding() % self.__start,
self.filepadding() % self.__end)
parentPath = os.path.dirname(fileInfo.filename())
clipName = (self.filehead() + frameRange +
'.' + self.fileext() + '@CLIP')
sourcePath = Path.Normalize(parentPath, clipName)
# Replace leading double slash with single slash, if necessary
if sourcePath[0:2] == '//':
sourcePath = sourcePath[1:]
return sourcePath
def __GetStartTimecode(self):
"""Retrieves the starting timecode for the shot, accounting for the
sequence timecode offset, the shot's position on the timeline, and the
cut handles.
@details Although semicolons indicate drop frame mode, the Wiretap
server will ignore that syntax unless the XML \c{DropMode}
element is set to \c{DF}.
@return \c{(str)} The shot's starting timecode in the form of
\c{hh:mm:ss:ff}.
@warning Shot retimes are not yet supported.
"""
if self.__isDropFrame:
displayType = Timecode.kDisplayDropFrameTimecode
else:
displayType = Timecode.kDisplayTimecode
time = (self._sequence.timecodeStart() + self._item.timelineIn() +
self.outputHandles()[0]) # time is in units of frames
tc = Timecode.timeToString(time, self.__timeBase,
displayType, False, 0)
return tc
class StonifyPreset(hiero.core.TaskPresetBase):
"""Stores settings for use with the Stonify task and task UI classes that
may be configured in the Hiero export dialog.
"""
def __init__(self, name, properties):
"""Sets up the Stonify task preset with default properties.
@param[in] name \c{(str)} The preset name, usually handled
automatically by Hiero.
@param[in] properties \c{(dict)} Preset properties to be updated.
"""
super(StonifyPreset, self).__init__(StonifyTask, name)
# NOTES:
#
# - Flame 2014 and newer natively support importing ProRes movies,
# so Mac Wiretap Gateways should not be necessary in the future.
defaults = {
'isWiretapShotProcess': False, # Wiretap processor will enable
'sourceHost': '', # TO DO: allow overrides for source (read) host
'osxGatewayHost': 'io-server:Gateway', # N/A for Flame 2014+
}
self.properties().update(defaults)
# Update preset with loaded data
self.properties().update(properties)
## @cond NOT_IMPLEMENTED
def addCustomResolveEntries(self, resolver):
"""Add resolvers for Wiretap servers, volumes, projects, and libraries.
"""
pass
## @endcond
def supportedItems(self):
"""Establishes that the associated task supports shots (track items).
@return \c{(core.Hiero.Python.ITaskPreset.ItemTypes)} or \c{(long)} The
supported item types which may be combined with bitwise \c{OR}
operators.
@todo Add support for \c{TaskPresetBase.kSequence}.
"""
return hiero.core.TaskPresetBase.kTrackItem
def GetBinaryPath(executable):
"""Retrieves the absolute path to the platform-appropriate Hiero executable
contained within the application folder.
@details The three possible executables are \c{hiero}, \c{HieroNuke}, and
\c{python}. The Python interpreter returned belongs to the
embedded copy of Nuke, not Hiero.
@param[in] executable \c{(str)} The name of the Hiero command-line program
without the extension.
@throws Raises a <a href="http://docs.python.org/2.6/library/exceptions.html#exceptions.ValueError" target="_blank">
ValueError</a> if given an invalid executable name.
@throws Raises an <a href="http://docs.python.org/2.6/library/exceptions.html#exceptions.OSError" target="_blank">
OSError</a> if running an unsupported OS.
@return \c{(str)} The absolute path to a Hiero executable.
@see \c{hiero.core.hieroNukePath()}
"""
HIERO_EXECUTABLES = ['hiero']
HIERO_NUKE_EXECUTABLES = ['HieroNuke', 'python']
EXECUTABLES = set(HIERO_EXECUTABLES) | set(HIERO_NUKE_EXECUTABLES)
if executable not in EXECUTABLES:
valid = ', '.join(EXECUTABLES)
raise ValueError(
'The executable name "{0}" is unsupported. Valid Hiero binaries '
"include: {1}".format(executable, valid))
HIERO_PATH = os.path.dirname(sys.executable)
binaryExt = ''
systemOS = platform.system()
if systemOS == 'Windows':
binaryExt = '.exe'
if executable in HIERO_EXECUTABLES:
return os.path.join(HIERO_PATH, executable) + binaryExt
else:
return (os.path.join(HIERO_PATH, 'HieroNuke', executable) +
binaryExt)
else:
raise OSError("UNIX path information is not available at this time.")
hiero.core.taskRegistry.registerTask(StonifyPreset, StonifyTask)
## @}
| {
"repo_name": "CBSDigital/Hiero-Wiretap",
"path": "plugins/FnStonify.py",
"copies": "1",
"size": "19083",
"license": "bsd-3-clause",
"hash": 5688566619608152000,
"line_mean": 38.3463917526,
"line_max": 124,
"alpha_frac": 0.5889535188,
"autogenerated": false,
"ratio": 4.436875145315043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5525828664115043,
"avg_score": null,
"num_lines": null
} |
"""A high level class that simplifies emulated protocol hosts.
Example: find all bgp sessions that are up and flap them
protocol_mgmt = IxnProtocolManagement(ixnhttp)
bgp_ipv4_peer = protocol_mgmt.find(type='bgpipv4peer', name='BGP Peer 1', session_state='up')
bgp_ipv4_peer.
"""
from ixnetwork.IxnQuery import IxnQuery
class IxnProtocolManagement(object):
"""Manage emulated protocol hosts"""
def __init__(self, ixnhttp):
self._ixnhttp = ixnhttp
def find(self, type, name, **kwargs):
""" Find emulated protocol host objects.
type: the node type
see the IxNetwork API Doc Browser for all possible node names under /topology
name: the node name (the content of the name property for the node type)
**kwargs: remaining search criteria of name/value pairs
each pair is a node property name/node property value
see the IxNetwork API Doc Browser for all possible property names for a given node name
'address', '1.1.1.1', 'gatewayIp', '^(1.1.2.)'
returns: an IxnNgpfObject that matches the supplied find critieria or None
the IxnNgpfObject has operations that are specific to the matched sessions
if only a single session is matched then operations will only be executed against that session
the base IxnObject is available to allow for modifying the entire node
"""
pass
| {
"repo_name": "OpenIxia/ixnetwork_client_python",
"path": "ixnetwork/IxnProtocolManagement.py",
"copies": "1",
"size": "1372",
"license": "mit",
"hash": -4229922102414817300,
"line_mean": 38.3529411765,
"line_max": 98,
"alpha_frac": 0.7201166181,
"autogenerated": false,
"ratio": 3.8757062146892656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095822832789265,
"avg_score": null,
"num_lines": null
} |
"""A high level class that simplifies the process of
adding physical ports annd mapping virtual ports to physical ports
Example:
port_mgmt = IxnPortManagement(ixnhttp)
port_mgmt \
.map('10.200.109.3', '1', '1') \
.map('10.200.109.3', '1', '2') \
.map('Ethernet - 001', '10.200.109.3', '1', '3') \
.map('Ethernet - 002', '10.200.109.3', '1', '4') \
.apply(force_clear_ownership=True)
port_mgmt.release()
"""
from ixnetwork.IxnQuery import IxnQuery
class IxnPortManagement(object):
"""Manage virtual port and physical port connections"""
def __init__(self, ixnhttp):
self._ixnhttp = ixnhttp
self.clear()
def clear(self):
"""Clear all internal virtual port to physical port mappings in this object."""
self._payload = {
'arg1': [],
'arg2': [],
'arg3': [],
'arg4': True
}
def map(self, vport_name, chassis_ip, card_id, port_id):
"""Map an existing virtual port to a chassis/card/port"""
if vport_name is None:
href = self._ixnhttp.root.create_vport().href
else:
query_result = IxnQuery(self._ixnhttp, '/').node('vport', properties=['name'], where=[{'property': 'name', 'regex': vport_name}]).go()
if len(query_result.vport) != 1:
raise Exception('vport %s does not exist on the server' % vport_name)
href = query_result.vport[0].href
self._payload['arg1'].append({'arg1': chassis_ip, 'arg2': card_id, 'arg3': port_id})
self._payload['arg3'].append(href)
return self
def apply(self, force_clear_ownership=False):
"""Apply all mappings by executing the assignPorts operation on the test tool"""
self._payload['arg4'] = force_clear_ownership
self._ixnhttp.root.operations.assignports(self._payload)
def status(self):
"""Get the status of the mappings"""
query_result = IxnQuery(self._ixnhttp, '/').node('vport', properties=['name', 'connectionStatus']).go()
status = []
for vport in query_result.vport:
if vport.href in self._payload['arg3']:
status.append('%s: %s' % (vport.attributes.name.value, vport.attributes.connectionStatus.value))
return status
def unapply(self):
"""Unapply all mappings by executing the unassignPorts operation on the test tool"""
payload = {
'arg1': self._payload['arg3'],
'arg2': False
}
self._ixnhttp.post('/vport/operations/unassignports', payload)
| {
"repo_name": "OpenIxia/ixnetwork_client_python",
"path": "ixnetwork/IxnPortManagement.py",
"copies": "1",
"size": "2624",
"license": "mit",
"hash": -1574607407959083800,
"line_mean": 38.1791044776,
"line_max": 146,
"alpha_frac": 0.584222561,
"autogenerated": false,
"ratio": 3.70098730606488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.478520986706488,
"avg_score": null,
"num_lines": null
} |
# A high-level interface to the pycurl extension
#
# ** mfx NOTE: the CGI class uses "black magic" using COOKIEFILE in
# combination with a non-existant file name. See the libcurl docs
# for more info.
#
# By Eric S. Raymond, April 2003.
import sys, pycurl
py3 = sys.version_info[0] == 3
# python 2/3 compatibility
if py3:
import urllib.parse as urllib_parse
from urllib.parse import urljoin
from io import BytesIO
else:
import urllib as urllib_parse
from urlparse import urljoin
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
import signal
from signal import SIGPIPE, SIG_IGN
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
pass
class Curl:
"High-level interface to pycurl functions."
def __init__(self, base_url="", fakeheaders=[]):
self.handle = pycurl.Curl()
# These members might be set.
self.set_url(base_url)
self.verbosity = 0
self.fakeheaders = fakeheaders
# Nothing past here should be modified by the caller.
self.payload = None
self.payload_io = BytesIO()
self.hrd = ""
# Verify that we've got the right site; harmless on a non-SSL connect.
self.set_option(pycurl.SSL_VERIFYHOST, 2)
# Follow redirects in case it wants to take us to a CGI...
self.set_option(pycurl.FOLLOWLOCATION, 1)
self.set_option(pycurl.MAXREDIRS, 5)
self.set_option(pycurl.NOSIGNAL, 1)
# Setting this option with even a nonexistent file makes libcurl
# handle cookie capture and playback automatically.
self.set_option(pycurl.COOKIEFILE, "/dev/null")
# Set timeouts to avoid hanging too long
self.set_timeout(30)
# Use password identification from .netrc automatically
self.set_option(pycurl.NETRC, 1)
self.set_option(pycurl.WRITEFUNCTION, self.payload_io.write)
def header_callback(x):
self.hdr += x.decode('ascii')
self.set_option(pycurl.HEADERFUNCTION, header_callback)
def set_timeout(self, timeout):
"Set timeout for a retrieving an object"
self.set_option(pycurl.TIMEOUT, timeout)
def set_url(self, url):
"Set the base URL to be retrieved."
self.base_url = url
self.set_option(pycurl.URL, self.base_url)
def set_option(self, *args):
"Set an option on the retrieval."
self.handle.setopt(*args)
def set_verbosity(self, level):
"Set verbosity to 1 to see transactions."
self.set_option(pycurl.VERBOSE, level)
def __request(self, relative_url=None):
"Perform the pending request."
if self.fakeheaders:
self.set_option(pycurl.HTTPHEADER, self.fakeheaders)
if relative_url:
self.set_option(pycurl.URL, urljoin(self.base_url, relative_url))
self.payload = None
self.hdr = ""
self.handle.perform()
self.payload = self.payload_io.getvalue()
return self.payload
def get(self, url="", params=None):
"Ship a GET request for a specified URL, capture the response."
if params:
url += "?" + urllib_parse.urlencode(params)
self.set_option(pycurl.HTTPGET, 1)
return self.__request(url)
def post(self, cgi, params):
"Ship a POST request to a specified CGI, capture the response."
self.set_option(pycurl.POST, 1)
self.set_option(pycurl.POSTFIELDS, urllib_parse.urlencode(params))
return self.__request(cgi)
def body(self):
"Return the body from the last response."
return self.payload
def header(self):
"Return the header from the last response."
return self.hdr
def get_info(self, *args):
"Get information about retrieval."
return self.handle.getinfo(*args)
def info(self):
"Return a dictionary with all info on the last response."
m = {}
m['effective-url'] = self.handle.getinfo(pycurl.EFFECTIVE_URL)
m['http-code'] = self.handle.getinfo(pycurl.HTTP_CODE)
m['total-time'] = self.handle.getinfo(pycurl.TOTAL_TIME)
m['namelookup-time'] = self.handle.getinfo(pycurl.NAMELOOKUP_TIME)
m['connect-time'] = self.handle.getinfo(pycurl.CONNECT_TIME)
m['pretransfer-time'] = self.handle.getinfo(pycurl.PRETRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['size-upload'] = self.handle.getinfo(pycurl.SIZE_UPLOAD)
m['size-download'] = self.handle.getinfo(pycurl.SIZE_DOWNLOAD)
m['speed-upload'] = self.handle.getinfo(pycurl.SPEED_UPLOAD)
m['header-size'] = self.handle.getinfo(pycurl.HEADER_SIZE)
m['request-size'] = self.handle.getinfo(pycurl.REQUEST_SIZE)
m['content-length-download'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD)
m['content-length-upload'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_UPLOAD)
m['content-type'] = self.handle.getinfo(pycurl.CONTENT_TYPE)
m['response-code'] = self.handle.getinfo(pycurl.RESPONSE_CODE)
m['speed-download'] = self.handle.getinfo(pycurl.SPEED_DOWNLOAD)
m['ssl-verifyresult'] = self.handle.getinfo(pycurl.SSL_VERIFYRESULT)
m['filetime'] = self.handle.getinfo(pycurl.INFO_FILETIME)
m['starttransfer-time'] = self.handle.getinfo(pycurl.STARTTRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['http-connectcode'] = self.handle.getinfo(pycurl.HTTP_CONNECTCODE)
m['httpauth-avail'] = self.handle.getinfo(pycurl.HTTPAUTH_AVAIL)
m['proxyauth-avail'] = self.handle.getinfo(pycurl.PROXYAUTH_AVAIL)
m['os-errno'] = self.handle.getinfo(pycurl.OS_ERRNO)
m['num-connects'] = self.handle.getinfo(pycurl.NUM_CONNECTS)
m['ssl-engines'] = self.handle.getinfo(pycurl.SSL_ENGINES)
m['cookielist'] = self.handle.getinfo(pycurl.INFO_COOKIELIST)
m['lastsocket'] = self.handle.getinfo(pycurl.LASTSOCKET)
m['ftp-entry-path'] = self.handle.getinfo(pycurl.FTP_ENTRY_PATH)
return m
def answered(self, check):
"Did a given check string occur in the last payload?"
return self.payload.find(check) >= 0
def close(self):
"Close a session, freeing resources."
if self.handle:
self.handle.close()
self.handle = None
self.hdr = ""
self.payload = ""
def __del__(self):
self.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
url = 'http://curl.haxx.se'
else:
url = sys.argv[1]
c = Curl()
c.get(url)
print(c.body())
print('='*74 + '\n')
import pprint
pprint.pprint(c.info())
print(c.get_info(pycurl.OS_ERRNO))
print(c.info()['os-errno'])
c.close()
| {
"repo_name": "yasoob/PythonRSSReader",
"path": "venv/lib/python2.7/dist-packages/curl/__init__.py",
"copies": "3",
"size": "7070",
"license": "mit",
"hash": -7697916630468215000,
"line_mean": 37.4239130435,
"line_max": 90,
"alpha_frac": 0.6398868458,
"autogenerated": false,
"ratio": 3.5815602836879434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5721447129487943,
"avg_score": null,
"num_lines": null
} |
'''A high-level interface to the pycurl extension'''
# ** mfx NOTE: the CGI class uses "black magic" using COOKIEFILE in
# combination with a non-existant file name. See the libcurl docs
# for more info.
import sys, pycurl
py3 = sys.version_info[0] == 3
# python 2/3 compatibility
if py3:
import urllib.parse as urllib_parse
from urllib.parse import urljoin
from io import BytesIO
else:
import urllib as urllib_parse
from urlparse import urljoin
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
import signal
from signal import SIGPIPE, SIG_IGN
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
pass
class Curl:
"High-level interface to pycurl functions."
def __init__(self, base_url="", fakeheaders=[]):
self.handle = pycurl.Curl()
# These members might be set.
self.set_url(base_url)
self.verbosity = 0
self.fakeheaders = fakeheaders
# Nothing past here should be modified by the caller.
self.payload = None
self.payload_io = BytesIO()
self.hrd = ""
# Verify that we've got the right site; harmless on a non-SSL connect.
self.set_option(pycurl.SSL_VERIFYHOST, 2)
# Follow redirects in case it wants to take us to a CGI...
self.set_option(pycurl.FOLLOWLOCATION, 1)
self.set_option(pycurl.MAXREDIRS, 5)
self.set_option(pycurl.NOSIGNAL, 1)
# Setting this option with even a nonexistent file makes libcurl
# handle cookie capture and playback automatically.
self.set_option(pycurl.COOKIEFILE, "/dev/null")
# Set timeouts to avoid hanging too long
self.set_timeout(30)
# Use password identification from .netrc automatically
self.set_option(pycurl.NETRC, 1)
self.set_option(pycurl.WRITEFUNCTION, self.payload_io.write)
def header_callback(x):
self.hdr += x.decode('ascii')
self.set_option(pycurl.HEADERFUNCTION, header_callback)
def set_timeout(self, timeout):
"Set timeout for a retrieving an object"
self.set_option(pycurl.TIMEOUT, timeout)
def set_url(self, url):
"Set the base URL to be retrieved."
self.base_url = url
self.set_option(pycurl.URL, self.base_url)
def set_option(self, *args):
"Set an option on the retrieval."
self.handle.setopt(*args)
def set_verbosity(self, level):
"Set verbosity to 1 to see transactions."
self.set_option(pycurl.VERBOSE, level)
def __request(self, relative_url=None):
"Perform the pending request."
if self.fakeheaders:
self.set_option(pycurl.HTTPHEADER, self.fakeheaders)
if relative_url:
self.set_option(pycurl.URL, urljoin(self.base_url, relative_url))
self.payload = None
self.hdr = ""
self.handle.perform()
self.payload = self.payload_io.getvalue()
return self.payload
def get(self, url="", params=None):
"Ship a GET request for a specified URL, capture the response."
if params:
url += "?" + urllib_parse.urlencode(params)
self.set_option(pycurl.HTTPGET, 1)
return self.__request(url)
def post(self, cgi, params):
"Ship a POST request to a specified CGI, capture the response."
self.set_option(pycurl.POST, 1)
self.set_option(pycurl.POSTFIELDS, urllib_parse.urlencode(params))
return self.__request(cgi)
def body(self):
"Return the body from the last response."
return self.payload
def header(self):
"Return the header from the last response."
return self.hdr
def get_info(self, *args):
"Get information about retrieval."
return self.handle.getinfo(*args)
def info(self):
"Return a dictionary with all info on the last response."
m = {}
m['effective-url'] = self.handle.getinfo(pycurl.EFFECTIVE_URL)
m['http-code'] = self.handle.getinfo(pycurl.HTTP_CODE)
m['total-time'] = self.handle.getinfo(pycurl.TOTAL_TIME)
m['namelookup-time'] = self.handle.getinfo(pycurl.NAMELOOKUP_TIME)
m['connect-time'] = self.handle.getinfo(pycurl.CONNECT_TIME)
m['pretransfer-time'] = self.handle.getinfo(pycurl.PRETRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['size-upload'] = self.handle.getinfo(pycurl.SIZE_UPLOAD)
m['size-download'] = self.handle.getinfo(pycurl.SIZE_DOWNLOAD)
m['speed-upload'] = self.handle.getinfo(pycurl.SPEED_UPLOAD)
m['header-size'] = self.handle.getinfo(pycurl.HEADER_SIZE)
m['request-size'] = self.handle.getinfo(pycurl.REQUEST_SIZE)
m['content-length-download'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD)
m['content-length-upload'] = self.handle.getinfo(pycurl.CONTENT_LENGTH_UPLOAD)
m['content-type'] = self.handle.getinfo(pycurl.CONTENT_TYPE)
m['response-code'] = self.handle.getinfo(pycurl.RESPONSE_CODE)
m['speed-download'] = self.handle.getinfo(pycurl.SPEED_DOWNLOAD)
m['ssl-verifyresult'] = self.handle.getinfo(pycurl.SSL_VERIFYRESULT)
m['filetime'] = self.handle.getinfo(pycurl.INFO_FILETIME)
m['starttransfer-time'] = self.handle.getinfo(pycurl.STARTTRANSFER_TIME)
m['redirect-time'] = self.handle.getinfo(pycurl.REDIRECT_TIME)
m['redirect-count'] = self.handle.getinfo(pycurl.REDIRECT_COUNT)
m['http-connectcode'] = self.handle.getinfo(pycurl.HTTP_CONNECTCODE)
m['httpauth-avail'] = self.handle.getinfo(pycurl.HTTPAUTH_AVAIL)
m['proxyauth-avail'] = self.handle.getinfo(pycurl.PROXYAUTH_AVAIL)
m['os-errno'] = self.handle.getinfo(pycurl.OS_ERRNO)
m['num-connects'] = self.handle.getinfo(pycurl.NUM_CONNECTS)
m['ssl-engines'] = self.handle.getinfo(pycurl.SSL_ENGINES)
m['cookielist'] = self.handle.getinfo(pycurl.INFO_COOKIELIST)
m['lastsocket'] = self.handle.getinfo(pycurl.LASTSOCKET)
m['ftp-entry-path'] = self.handle.getinfo(pycurl.FTP_ENTRY_PATH)
return m
def answered(self, check):
"Did a given check string occur in the last payload?"
return self.payload.find(check) >= 0
def close(self):
"Close a session, freeing resources."
if self.handle:
self.handle.close()
self.handle = None
self.hdr = ""
self.payload = ""
def __del__(self):
self.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
url = 'http://curl.haxx.se'
else:
url = sys.argv[1]
c = Curl()
c.get(url)
print(c.body())
print('='*74 + '\n')
import pprint
pprint.pprint(c.info())
print(c.get_info(pycurl.OS_ERRNO))
print(c.info()['os-errno'])
c.close()
| {
"repo_name": "solome/jyp",
"path": "misc/virtenv/lib/python2.7/site-packages/curl/__init__.py",
"copies": "10",
"size": "7037",
"license": "mpl-2.0",
"hash": 8535137645339292000,
"line_mean": 37.6648351648,
"line_max": 90,
"alpha_frac": 0.6396191559,
"autogenerated": false,
"ratio": 3.5958099131323453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9235429069032345,
"avg_score": null,
"num_lines": null
} |
"""A high-level wrapper around the whole ui.TableView system."""
import ui
import collections
class Cell():
"""A single cell in a ui.TableView.
This class "subclasses" ui.TableViewCell by wrapping it.
"""
def __init__(self):
self._cell = ui.TableViewCell()
@property
def accessory_type(self):
return self._cell.accessory_type
@accessory_type.setter
def accessory_type(self, value):
self._cell.accessory_type = value
@property
def content_view(self):
return self._cell.content_view
@property
def detail_text_label(self):
return self._cell.detail_text_label
@property
def image_view(self):
return self._cell.image_view
@property
def selectable(self):
return self._cell.selectable
@selectable.setter
def selectable(self, value):
self._cell.selectable = value
@property
def selected_background_view(self):
return self._cell.selected_background_view
@selected_background_view.setter
def selected_background_view(self, value):
self._cell.selected_background_view = value
@property
def text_label(self):
return self._cell.text_label
class Section(collections.MutableSet):
"""A section inside a TableView.
This contains TableView cells.
"""
def __init__(self, tableview):
self.cells = set()
self.tableview = tv
def __contains__(self, item):
return item in self.cells
def __iter__(self):
return iter(self.cells)
def add(self, cell):
self.cells.add(key)
def discard(self, cell):
self.cells.discard(cell)
class TableView(collections.Container):
"""A view to display a list of items in a single column."""
def __init__(self):
self.sections = [Section(self)]
def __contains__(self, key):
return key in self.sections
| {
"repo_name": "controversial/ui2",
"path": "ui2/view_classes/TableView.py",
"copies": "1",
"size": "1923",
"license": "mit",
"hash": 5480735436470973000,
"line_mean": 21.8928571429,
"line_max": 64,
"alpha_frac": 0.6307852314,
"autogenerated": false,
"ratio": 4.00625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002380952380952381,
"num_lines": 84
} |
"""A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
from __future__ import absolute_import
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_fileobject',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher']
import os
try:
import queue
except:
import Queue as queue
import re
import rfc822
import socket
import sys
if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
try:
import cStringIO as StringIO
except ImportError:
import StringIO
DEFAULT_BUFFER_SIZE = -1
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import threading
import time
import traceback
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
from urllib import unquote
from urlparse import urlparse
import warnings
if sys.version_info >= (3, 0):
bytestr = bytes
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
bytestr = str
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']]
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return EMPTY.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server= server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = ntob("http")
if self.server.ssl_adapter is not None:
self.scheme = ntob("https")
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
self.read_request_line()
except MaxSizeExceeded:
self.simple_response("414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
# Force self.ready = False so the connection will close.
self.ready = False
return
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
self.ready = False
return
if not request_line.endswith(CRLF):
self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
return
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
rp = int(req_protocol[5]), int(req_protocol[7])
except (ValueError, IndexError):
self.simple_response("400 Bad Request", "Malformed Request-Line")
return
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if NUMBER_SIGN in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
self.scheme = scheme
qs = EMPTY
if QUESTION_MARK in path:
path, qs = path.split(QUESTION_MARK, 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [unquote(x) for x in quoted_slash.split(path)]
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return
path = "%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get("Connection", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get("Connection", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get("Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get("Expect", "") == "100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See http://www.cherrypy.org/ticket/951
msg = self.server.protocol + " 100 Continue\r\n\r\n"
try:
self.conn.wfile.sendall(msg)
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == ASTERISK:
return None, None, uri
i = uri.find('://')
if i > 0 and QUESTION_MARK not in uri[:i]:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
scheme, remainder = uri[:i].lower(), uri[i + 3:]
authority, path = remainder.split(FORWARD_SLASH, 1)
path = FORWARD_SLASH + path
return scheme, authority, path
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [self.server.protocol + SPACE +
status + CRLF,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append("Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall(EMPTY.join(buf))
else:
self.conn.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall(EMPTY.join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def __init__(self, *args, **kwargs):
self.bytes_read = 0
self.bytes_written = 0
socket._fileobject.__init__(self, *args, **kwargs)
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
bytes_sent = self._sock.send(data)
self.bytes_written += bytes_sent
return bytes_sent
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
data = self._sock.recv(size)
self.bytes_read += len(data)
return data
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
if not _fileobject_uses_str_type:
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_fileobject):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if errnum == 'timed out' or errnum == 'The read operation timed out':
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See http://www.cherrypy.org/ticket/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error",
format_exc())
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error", format_exc())
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
class TrueyZero(object):
"""An object which equals and does math like the integer '0' but evals True."""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen),
'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read),
'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written),
'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time),
'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit):
exc = sys.exc_info()[1]
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
for i in range(amount):
if self.max > 0 and len(self._threads) >= self.max:
break
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
self._threads.append(worker)
worker.start()
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
if amount > 0:
for i in range(min(amount, len(self._threads) - self.min)):
# Put a number of shutdown requests on the queue equal
# to 'amount'. Once each of those is processed by a worker,
# that worker will terminate and be culled from our list
# in self.put.
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt):
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit)."""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/3.2.1"
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Read'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Written'](w) for w
in s['Worker Threads'].values()], 0),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Time'](w) for w
in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# SSL backward compatibility
if (self.ssl_adapter is None and
getattr(self, 'ssl_certificate', None) and
getattr(self, 'ssl_private_key', None)):
warnings.warn(
"SSL attributes are deprecated in CherryPy 3.2, and will "
"be removed in CherryPy 3.3. Use an ssl_adapter attribute "
"instead.",
DeprecationWarning
)
try:
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
except ImportError:
pass
else:
self.ssl_adapter = pyOpenSSLAdapter(
self.ssl_certificate, self.ssl_private_key,
getattr(self, 'ssl_certificate_chain', None))
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 511) # 0777
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
self.tick()
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_fileobject
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See http://www.cherrypy.org/ticket/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
"""A base class to interface HTTPServer with other systems, such as WSGI."""
def __init__(self, req):
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='pyopenssl'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# -------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
"""A subclass of HTTPServer which calls a WSGI application."""
wsgi_version = (1, 0)
"""The version of WSGI to produce."""
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
"""A base class to interface HTTPServer with WSGI."""
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
"""Process the current request."""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for k, v in headers:
if not isinstance(k, bytestr):
raise TypeError("WSGI response header key %r is not a byte string." % k)
if not isinstance(v, bytestr):
raise TypeError("WSGI response header value %r is not a byte string." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response("500 Internal Server Error",
"The requested resource returned more bytes than the "
"declared Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
"""A Gateway class to interface HTTPServer with WSGI 1.0.x."""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path,
'QUERY_STRING': req.qs,
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method,
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol,
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme,
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.iteritems():
env["HTTP_" + k.upper().replace("-", "_")] = v
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
"""A Gateway class to interface HTTPServer with WSGI u.0.
WSGI u.0 is an experimental protocol, which uses unicode for keys and values
in both Python 2 and Python 3.
"""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for k, v in sorted(env.items()):
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
env[k] = v.decode('ISO-8859-1')
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = list(apps.items())
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
| {
"repo_name": "mapproxy/mapproxy-renderd",
"path": "mp_renderd/ext/wsgiserver.py",
"copies": "1",
"size": "86726",
"license": "apache-2.0",
"hash": 1431731323040410600,
"line_mean": 36.7069565217,
"line_max": 135,
"alpha_frac": 0.5290916219,
"autogenerated": false,
"ratio": 4.416458725874625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001572771697229737,
"num_lines": 2300
} |
"""A high-speed, production ready, thread pooled, generic WSGI server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery):
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!\n']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher:
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set these attributes:
server.ssl_certificate = <filename>
server.ssl_private_key = <filename>
if __name__ == '__main__':
try:
server.start()
except KeyboardInterrupt:
server.stop()
This won't call the CherryPy engine (application side) at all, only the
WSGI server, which is independant from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue:
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop:
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
req.read_headers()
req.respond()
-> response = wsgi_app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
import base64
import os
import Queue
import re
quoted_slash = re.compile("(?i)%2F")
import rfc822
import socket
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import sys
import threading
import time
import traceback
from urllib import unquote
from urlparse import urlparse
import warnings
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return dict.fromkeys(nums).keys()
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = ['ACCEPT', 'ACCEPT-CHARSET', 'ACCEPT-ENCODING',
'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL',
'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT',
'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE',
'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING',
'WWW-AUTHENTICATE']
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = apps.items()
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort()
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return ''.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
send: the 'send' method from the connection's socket object.
wsgi_app: the WSGI application to call.
environ: a partial WSGI environ (server and connection entries).
The caller MUST set the following entries:
* All wsgi.* entries, including .input
* SERVER_NAME and SERVER_PORT
* Any SSL_* entries
* Any custom entries like REMOTE_ADDR and REMOTE_PORT
* SERVER_SOFTWARE: the value to write in the "Server" response header.
* ACTUAL_SERVER_PROTOCOL: the value to write in the Status-Line of
the response. From RFC 2145: "An HTTP server SHOULD send a
response version equal to the highest version for which the
server is at least conditionally compliant, and whose major
version is less than or equal to the one received in the
request. An HTTP server MUST NOT send a version for which
it is not at least conditionally compliant."
outheaders: a list of header tuples to write in the response.
ready: when True, the request has been parsed and is ready to begin
generating the response. When False, signals the calling Connection
that the response should not be generated and the connection should
close.
close_connection: signals the calling Connection that the request
should close. This does not imply an error! The client and/or
server may each request that the connection be closed.
chunked_write: if True, output will be encoded with the "chunked"
transfer-coding. This value is set automatically inside
send_headers.
"""
max_request_header_size = 0
max_request_body_size = 0
def __init__(self, wfile, environ, wsgi_app):
self.rfile = environ['wsgi.input']
self.wfile = wfile
self.environ = environ.copy()
self.wsgi_app = wsgi_app
self.ready = False
self.started_response = False
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = False
self.chunked_write = False
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile.maxlen = self.max_request_header_size
self.rfile.bytes_read = 0
try:
self._parse_request()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large")
return
def _parse_request(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
if not request_line:
# Force self.ready = False so the connection will close.
self.ready = False
return
if request_line == "\r\n":
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
self.ready = False
return
environ = self.environ
method, path, req_protocol = request_line.strip().split(" ", 2)
environ["REQUEST_METHOD"] = method
# path may be an abs_path (including "http://host.domain.tld");
scheme, location, path, params, qs, frag = urlparse(path)
if frag:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
environ["wsgi.url_scheme"] = scheme
if params:
path = path + ";" + params
environ["SCRIPT_NAME"] = ""
# Unquote the path+params (e.g. "/this%20path" -> "this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
atoms = [unquote(x) for x in quoted_slash.split(path)]
path = "%2F".join(atoms)
environ["PATH_INFO"] = path
# Note that, like wsgiref and most other WSGI servers,
# we unquote the path but not the query string.
environ["QUERY_STRING"] = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
server_protocol = environ["ACTUAL_SERVER_PROTOCOL"]
sp = int(server_protocol[5]), int(server_protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
environ["SERVER_PROTOCOL"] = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
# If the Request-URI was an absoluteURI, use its location atom.
if location:
environ["SERVER_NAME"] = location
# then all the http headers
try:
self.read_headers()
except ValueError, ex:
self.simple_response("400 Bad Request", repr(ex.args))
return
mrbs = self.max_request_body_size
if mrbs and int(environ.get("CONTENT_LENGTH", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large")
return
# Set AUTH_TYPE, REMOTE_USER
creds = environ.get("HTTP_AUTHORIZATION", "").split(" ", 1)
environ["AUTH_TYPE"] = creds[0]
if creds[0].lower() == 'basic':
user, pw = base64.decodestring(creds[1]).split(":", 1)
environ["REMOTE_USER"] = user
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if environ.get("HTTP_CONNECTION", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if environ.get("HTTP_CONNECTION", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = environ.get("HTTP_TRANSFER_ENCODING")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if environ.get("HTTP_EXPECT", "") == "100-continue":
self.simple_response(100)
self.ready = True
def read_headers(self):
"""Read header lines from the incoming stream."""
environ = self.environ
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == '\r\n':
# Normal end of headers
break
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
k, v = line.split(":", 1)
k, v = k.strip().upper(), v.strip()
envname = "HTTP_" + k.replace("-", "_")
if k in comma_separated_headers:
existing = environ.get(envname)
if existing:
v = ", ".join((existing, v))
environ[envname] = v
ct = environ.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
environ["CONTENT_TYPE"] = ct
cl = environ.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
environ["CONTENT_LENGTH"] = cl
def decode_chunked(self):
"""Decode the 'chunked' transfer coding."""
cl = 0
data = StringIO.StringIO()
while True:
line = self.rfile.readline().strip().split(";", 1)
chunk_size = int(line.pop(0), 16)
if chunk_size <= 0:
break
## if line: chunk_extension = line[0]
cl += chunk_size
data.write(self.rfile.read(chunk_size))
crlf = self.rfile.read(2)
if crlf != "\r\n":
self.simple_response("400 Bad Request",
"Bad chunked transfer coding "
"(expected '\\r\\n', got %r)" % crlf)
return
# Grab any trailer headers
self.read_headers()
data.seek(0)
self.environ["wsgi.input"] = data
self.environ["CONTENT_LENGTH"] = str(cl) or ""
return True
def respond(self):
"""Call the appropriate WSGI app and write its iterable output."""
# Set rfile.maxlen to ensure we don't read past Content-Length.
# This will also be used to read the entire request body if errors
# are raised before the app can read the body.
if self.chunked_read:
# If chunked, Content-Length will be 0.
self.rfile.maxlen = self.max_request_body_size
else:
cl = int(self.environ.get("CONTENT_LENGTH", 0))
if self.max_request_body_size:
self.rfile.maxlen = min(cl, self.max_request_body_size)
else:
self.rfile.maxlen = cl
self.rfile.bytes_read = 0
try:
self._respond()
except MaxSizeExceeded:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large")
return
def _respond(self):
if self.chunked_read:
if not self.decode_chunked():
self.close_connection = True
return
response = self.wsgi_app(self.environ, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = ["%s %s\r\n" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
# Request Entity Too Large
self.close_connection = True
buf.append("Connection: close\r\n")
buf.append("\r\n")
if msg:
buf.append(msg)
self.wfile.sendall("".join(buf))
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.started_response = True
self.status = status
self.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
if not self.sent_headers:
self.sent_headers = True
self.send_headers()
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
self.wfile.sendall("".join(buf))
else:
self.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers."""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if self.response_protocol == 'HTTP/1.1':
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
size = self.rfile.maxlen - self.rfile.bytes_read
if size > 0:
self.rfile.read(size)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.environ['SERVER_SOFTWARE']))
buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], " ", self.status, "\r\n"]
try:
buf += [k + ": " + v + "\r\n" for k, v in self.outheaders]
except TypeError:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not a string.")
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not a string.")
else:
raise
buf.append("\r\n")
self.wfile.sendall("".join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
return self._sock.send(data)
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
return self._sock.recv(size)
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class SSL_fileobject(CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise NoSSLError()
raise FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, *args, **kwargs):
buf = []
r = super(SSL_fileobject, self).recv
while True:
data = self._safe_call(True, r, *args, **kwargs)
buf.append(data)
p = self._sock.pending()
if not p:
return "".join(buf)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs)
class HTTPConnection(object):
"""An HTTP connection (active socket).
socket: the raw socket object (usually TCP) for this connection.
wsgi_app: the WSGI application for this server/connection.
environ: a WSGI environ template. This will be copied for each request.
rfile: a fileobject for reading from the socket.
send: a function for writing (+ flush) to the socket.
"""
rbufsize = -1
RequestHandlerClass = HTTPRequest
environ = {"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.multithread": True,
"wsgi.multiprocess": False,
"wsgi.run_once": False,
"wsgi.errors": sys.stderr,
}
def __init__(self, sock, wsgi_app, environ):
self.socket = sock
self.wsgi_app = wsgi_app
# Copy the class environ into self.
self.environ = self.environ.copy()
self.environ.update(environ)
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
self.rfile = SSL_fileobject(sock, "rb", self.rbufsize)
self.rfile.ssl_timeout = timeout
self.wfile = SSL_fileobject(sock, "wb", -1)
self.wfile.ssl_timeout = timeout
else:
self.rfile = CP_fileobject(sock, "rb", self.rbufsize)
self.wfile = CP_fileobject(sock, "wb", -1)
# Wrap wsgi.input but not HTTPConnection.rfile itself.
# We're also not setting maxlen yet; we'll do that separately
# for headers and body for each iteration of self.communicate
# (if maxlen is 0 the wrapper doesn't check length).
self.environ["wsgi.input"] = SizeCheckWrapper(self.rfile, 0)
def communicate(self):
"""Read each request and respond appropriately."""
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.wfile, self.environ,
self.wsgi_app)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if not req.ready:
return
req.respond()
if req.close_connection:
return
except socket.error, e:
errnum = e.args[0]
if errnum == 'timed out':
if req and not req.sent_headers:
req.simple_response("408 Request Timeout")
elif errnum not in socket_errors_to_ignore:
if req and not req.sent_headers:
req.simple_response("500 Internal Server Error",
format_exc())
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert, e:
# Close the connection.
return
except NoSSLError:
# Unwrap our wfile
req.wfile = CP_fileobject(self.socket, "wb", -1)
if req and not req.sent_headers:
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
except Exception, e:
if req and not req.sent_headers:
req.simple_response("500 Internal Server Error", format_exc())
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
self.socket._sock.close()
self.socket.close()
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
server: the HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it.
ready: a simple flag for the calling server to know when this thread
has begun polling the Queue.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
def __init__(self, server):
self.ready = False
self.server = server
threading.Thread.__init__(self)
def run(self):
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
try:
conn.communicate()
finally:
conn.close()
self.conn = None
except (KeyboardInterrupt, SystemExit), exc:
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for the CherryPyWSGIServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = Queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in xrange(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP WSGIServer " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
for i in xrange(amount):
if self.max > 0 and len(self._threads) >= self.max:
break
worker = WorkerThread(self.server)
worker.setName("CP WSGIServer " + worker.getName())
self._threads.append(worker)
worker.start()
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
if amount > 0:
for i in xrange(min(amount, len(self._threads) - self.min)):
# Put a number of shutdown requests on the queue equal
# to 'amount'. Once each of those is processed by a worker,
# that worker will terminate and be culled from our list
# in self.put.
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
worker.join(timeout)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
if SSL and isinstance(c.socket, SSL.ConnectionType):
# pyOpenSSL.socket.shutdown takes no args
c.socket.shutdown()
else:
c.socket.shutdown(socket.SHUT_RD)
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt), exc1:
pass
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
*args: the arguments to create the wrapped SSL.Connection(*args).
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout'):
exec """def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f)
class CherryPyWSGIServer(object):
"""An HTTP server for WSGI.
bind_addr: The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.
wsgi_app: the WSGI 'application callable'; multiple WSGI applications
may be passed as (path_prefix, app) pairs.
numthreads: the number of worker threads to create (default 10).
server_name: the string to set for WSGI's SERVER_NAME environ entry.
Defaults to socket.gethostname().
max: the maximum number of queued requests (defaults to -1 = no limit).
request_queue_size: the 'backlog' argument to socket.listen();
specifies the maximum number of queued connections (default 5).
timeout: the timeout in seconds for accepted connections (default 10).
nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
option.
protocol: the version string to write in the Status-Line of all
HTTP responses. For example, "HTTP/1.1" (the default). This
also limits the supported features used in the response.
SSL/HTTPS
---------
The OpenSSL module must be importable for SSL functionality.
You can obtain it from http://pyopenssl.sourceforge.net/
ssl_certificate: the filename of the server SSL certificate.
ssl_privatekey: the filename of the server's private key file.
If either of these is None (both are None by default), this server
will not use SSL. If both are given and are valid, they will be read
on server start and used in the SSL context for the listening socket.
"""
protocol = "HTTP/1.1"
_bind_addr = "127.0.0.1"
version = "CherryPy/3.1.0"
ready = False
_interrupt = None
nodelay = True
ConnectionClass = HTTPConnection
environ = {}
# Paths to certificate and private key files
ssl_certificate = None
ssl_private_key = None
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
if callable(wsgi_app):
# We've been handed a single wsgi_app, in CP-2.1 style.
# Assume it's mounted at "".
self.wsgi_app = wsgi_app
else:
# We've been handed a list of (path_prefix, wsgi_app) tuples,
# so that the server can call different wsgi_apps, and also
# correctly set SCRIPT_NAME.
warnings.warn("The ability to pass multiple apps is deprecated "
"and will be removed in 3.2. You should explicitly "
"include a WSGIPathInfoDispatcher instead.",
DeprecationWarning)
self.wsgi_app = WSGIPathInfoDispatcher(wsgi_app)
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 0777)
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
# Probably a DNS issue. Assume IPv4.
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error, msg:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error, msg
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
while self.ready:
self.tick()
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay:
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_certificate and self.ssl_private_key:
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey_file(self.ssl_private_key)
ctx.use_certificate_file(self.ssl_certificate)
self.socket = SSLConnection(ctx, self.socket)
self.populate_ssl_environ()
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if not self.ready:
return
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
environ = self.environ.copy()
# SERVER_SOFTWARE is common for IIS. It's also helpful for
# us to pass a default value for the "Server" response header.
environ["SERVER_SOFTWARE"] = "%s WSGI Server" % self.version
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
environ["ACTUAL_SERVER_PROTOCOL"] = self.protocol
environ["SERVER_NAME"] = self.server_name
if isinstance(self.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
environ["SERVER_PORT"] = ""
else:
environ["SERVER_PORT"] = str(self.bind_addr[1])
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
environ["REMOTE_ADDR"] = addr[0]
environ["REMOTE_PORT"] = str(addr[1])
conn = self.ConnectionClass(s, self.wsgi_app, environ)
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error, x:
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error, x:
if x.args[1] != "Bad file descriptor":
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
def populate_ssl_environ(self):
"""Create WSGI environ entries to be merged into each request."""
cert = open(self.ssl_certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
## 'SSL_PROTOCOL': 'SSLv2',
## SSL_CIPHER string The cipher specification name
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
# Server certificate attributes
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
self.environ.update(ssl_environ)
| {
"repo_name": "fmcingvale/cherrypy_gae",
"path": "wsgiserver/__init__.py",
"copies": "1",
"size": "61017",
"license": "bsd-3-clause",
"hash": 3918985307104238000,
"line_mean": 38.1134615385,
"line_max": 91,
"alpha_frac": 0.5375714965,
"autogenerated": false,
"ratio": 4.432120287644367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01872838018090376,
"num_lines": 1560
} |
"""A high-speed, production ready, thread pooled, generic WSGI server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery):
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!\n']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher:
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set these attributes:
server.ssl_certificate = <filename>
server.ssl_private_key = <filename>
if __name__ == '__main__':
try:
server.start()
except KeyboardInterrupt:
server.stop()
This won't call the CherryPy engine (application side) at all, only the
WSGI server, which is independant from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue:
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop:
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
req.read_headers()
req.respond()
-> response = wsgi_app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
import base64
import os
import Queue
import re
quoted_slash = re.compile("(?i)%2F")
import rfc822
import socket
try:
import cStringIO as StringIO
except ImportError:
import StringIO
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import sys
import threading
import time
import traceback
from urllib import unquote
from urlparse import urlparse
import warnings
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return dict.fromkeys(nums).keys()
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = ['ACCEPT', 'ACCEPT-CHARSET', 'ACCEPT-ENCODING',
'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL',
'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT',
'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE',
'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING',
'WWW-AUTHENTICATE']
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = apps.items()
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort()
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return ''.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
send: the 'send' method from the connection's socket object.
wsgi_app: the WSGI application to call.
environ: a partial WSGI environ (server and connection entries).
The caller MUST set the following entries:
* All wsgi.* entries, including .input
* SERVER_NAME and SERVER_PORT
* Any SSL_* entries
* Any custom entries like REMOTE_ADDR and REMOTE_PORT
* SERVER_SOFTWARE: the value to write in the "Server" response header.
* ACTUAL_SERVER_PROTOCOL: the value to write in the Status-Line of
the response. From RFC 2145: "An HTTP server SHOULD send a
response version equal to the highest version for which the
server is at least conditionally compliant, and whose major
version is less than or equal to the one received in the
request. An HTTP server MUST NOT send a version for which
it is not at least conditionally compliant."
outheaders: a list of header tuples to write in the response.
ready: when True, the request has been parsed and is ready to begin
generating the response. When False, signals the calling Connection
that the response should not be generated and the connection should
close.
close_connection: signals the calling Connection that the request
should close. This does not imply an error! The client and/or
server may each request that the connection be closed.
chunked_write: if True, output will be encoded with the "chunked"
transfer-coding. This value is set automatically inside
send_headers.
"""
max_request_header_size = 0
max_request_body_size = 0
def __init__(self, wfile, environ, wsgi_app):
self.rfile = environ['wsgi.input']
self.wfile = wfile
self.environ = environ.copy()
self.wsgi_app = wsgi_app
self.ready = False
self.started_response = False
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = False
self.chunked_write = False
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile.maxlen = self.max_request_header_size
self.rfile.bytes_read = 0
try:
self._parse_request()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large")
return
def _parse_request(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
if not request_line:
# Force self.ready = False so the connection will close.
self.ready = False
return
if request_line == "\r\n":
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
self.ready = False
return
environ = self.environ
try:
method, path, req_protocol = request_line.strip().split(" ", 2)
except ValueError:
self.simple_response(400, "Malformed Request-Line")
return
environ["REQUEST_METHOD"] = method
# path may be an abs_path (including "http://host.domain.tld");
scheme, location, path, params, qs, frag = urlparse(path)
if frag:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return
if scheme:
environ["wsgi.url_scheme"] = scheme
if params:
path = path + ";" + params
environ["SCRIPT_NAME"] = ""
# Unquote the path+params (e.g. "/this%20path" -> "this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
atoms = [unquote(x) for x in quoted_slash.split(path)]
path = "%2F".join(atoms)
environ["PATH_INFO"] = path
# Note that, like wsgiref and most other WSGI servers,
# we unquote the path but not the query string.
environ["QUERY_STRING"] = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
rp = int(req_protocol[5]), int(req_protocol[7])
server_protocol = environ["ACTUAL_SERVER_PROTOCOL"]
sp = int(server_protocol[5]), int(server_protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
environ["SERVER_PROTOCOL"] = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
# If the Request-URI was an absoluteURI, use its location atom.
if location:
environ["SERVER_NAME"] = location
# then all the http headers
try:
self.read_headers()
except ValueError, ex:
self.simple_response("400 Bad Request", repr(ex.args))
return
mrbs = self.max_request_body_size
if mrbs and int(environ.get("CONTENT_LENGTH", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large")
return
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if environ.get("HTTP_CONNECTION", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if environ.get("HTTP_CONNECTION", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = environ.get("HTTP_TRANSFER_ENCODING")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if environ.get("HTTP_EXPECT", "") == "100-continue":
self.simple_response(100)
self.ready = True
def read_headers(self):
"""Read header lines from the incoming stream."""
environ = self.environ
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == '\r\n':
# Normal end of headers
break
if line[0] in ' \t':
# It's a continuation line.
v = line.strip()
else:
k, v = line.split(":", 1)
k, v = k.strip().upper(), v.strip()
envname = "HTTP_" + k.replace("-", "_")
if k in comma_separated_headers:
existing = environ.get(envname)
if existing:
v = ", ".join((existing, v))
environ[envname] = v
ct = environ.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
environ["CONTENT_TYPE"] = ct
cl = environ.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
environ["CONTENT_LENGTH"] = cl
def decode_chunked(self):
"""Decode the 'chunked' transfer coding."""
cl = 0
data = StringIO.StringIO()
while True:
line = self.rfile.readline().strip().split(";", 1)
chunk_size = int(line.pop(0), 16)
if chunk_size <= 0:
break
## if line: chunk_extension = line[0]
cl += chunk_size
data.write(self.rfile.read(chunk_size))
crlf = self.rfile.read(2)
if crlf != "\r\n":
self.simple_response("400 Bad Request",
"Bad chunked transfer coding "
"(expected '\\r\\n', got %r)" % crlf)
return
# Grab any trailer headers
self.read_headers()
data.seek(0)
self.environ["wsgi.input"] = data
self.environ["CONTENT_LENGTH"] = str(cl) or ""
return True
def respond(self):
"""Call the appropriate WSGI app and write its iterable output."""
# Set rfile.maxlen to ensure we don't read past Content-Length.
# This will also be used to read the entire request body if errors
# are raised before the app can read the body.
if self.chunked_read:
# If chunked, Content-Length will be 0.
self.rfile.maxlen = self.max_request_body_size
else:
cl = int(self.environ.get("CONTENT_LENGTH", 0))
if self.max_request_body_size:
self.rfile.maxlen = min(cl, self.max_request_body_size)
else:
self.rfile.maxlen = cl
self.rfile.bytes_read = 0
try:
self._respond()
except MaxSizeExceeded:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large")
return
def _respond(self):
if self.chunked_read:
if not self.decode_chunked():
self.close_connection = True
return
response = self.wsgi_app(self.environ, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = ["%s %s\r\n" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
# Request Entity Too Large
self.close_connection = True
buf.append("Connection: close\r\n")
buf.append("\r\n")
if msg:
buf.append(msg)
try:
self.wfile.sendall("".join(buf))
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.started_response = True
self.status = status
self.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
if not self.sent_headers:
self.sent_headers = True
self.send_headers()
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
self.wfile.sendall("".join(buf))
else:
self.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers."""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.environ["REQUEST_METHOD"] != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
size = self.rfile.maxlen - self.rfile.bytes_read
if size > 0:
self.rfile.read(size)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.environ['SERVER_SOFTWARE']))
buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], " ", self.status, "\r\n"]
try:
buf += [k + ": " + v + "\r\n" for k, v in self.outheaders]
except TypeError:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not a string.")
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not a string.")
else:
raise
buf.append("\r\n")
self.wfile.sendall("".join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
if not _fileobject_uses_str_type:
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
return self._sock.send(data)
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
return self._sock.recv(size)
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
return self._sock.send(data)
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
return self._sock.recv(size)
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class SSL_fileobject(CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error, e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise NoSSLError()
raise FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, *args, **kwargs):
buf = []
r = super(SSL_fileobject, self).recv
while True:
data = self._safe_call(True, r, *args, **kwargs)
buf.append(data)
p = self._sock.pending()
if not p:
return "".join(buf)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs)
class HTTPConnection(object):
"""An HTTP connection (active socket).
socket: the raw socket object (usually TCP) for this connection.
wsgi_app: the WSGI application for this server/connection.
environ: a WSGI environ template. This will be copied for each request.
rfile: a fileobject for reading from the socket.
send: a function for writing (+ flush) to the socket.
"""
rbufsize = -1
RequestHandlerClass = HTTPRequest
environ = {"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.multithread": True,
"wsgi.multiprocess": False,
"wsgi.run_once": False,
"wsgi.errors": sys.stderr,
}
def __init__(self, sock, wsgi_app, environ):
self.socket = sock
self.wsgi_app = wsgi_app
# Copy the class environ into self.
self.environ = self.environ.copy()
self.environ.update(environ)
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
self.rfile = SSL_fileobject(sock, "rb", self.rbufsize)
self.rfile.ssl_timeout = timeout
self.wfile = SSL_fileobject(sock, "wb", -1)
self.wfile.ssl_timeout = timeout
else:
self.rfile = CP_fileobject(sock, "rb", self.rbufsize)
self.wfile = CP_fileobject(sock, "wb", -1)
# Wrap wsgi.input but not HTTPConnection.rfile itself.
# We're also not setting maxlen yet; we'll do that separately
# for headers and body for each iteration of self.communicate
# (if maxlen is 0 the wrapper doesn't check length).
self.environ["wsgi.input"] = SizeCheckWrapper(self.rfile, 0)
def communicate(self):
"""Read each request and respond appropriately."""
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.wfile, self.environ,
self.wsgi_app)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if not req.ready:
return
req.respond()
if req.close_connection:
return
except socket.error, e:
errnum = e.args[0]
if errnum == 'timed out':
if req and not req.sent_headers:
req.simple_response("408 Request Timeout")
elif errnum not in socket_errors_to_ignore:
if req and not req.sent_headers:
req.simple_response("500 Internal Server Error",
format_exc())
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert, e:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
req.wfile = CP_fileobject(self.socket._sock, "wb", -1)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception, e:
if req and not req.sent_headers:
req.simple_response("500 Internal Server Error", format_exc())
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
server: the HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it.
ready: a simple flag for the calling server to know when this thread
has begun polling the Queue.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
def __init__(self, server):
self.ready = False
self.server = server
threading.Thread.__init__(self)
def run(self):
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
try:
conn.communicate()
finally:
conn.close()
self.conn = None
except (KeyboardInterrupt, SystemExit), exc:
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for the CherryPyWSGIServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = Queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in xrange(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP WSGIServer " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
for i in xrange(amount):
if self.max > 0 and len(self._threads) >= self.max:
break
worker = WorkerThread(self.server)
worker.setName("CP WSGIServer " + worker.getName())
self._threads.append(worker)
worker.start()
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
if amount > 0:
for i in xrange(min(amount, len(self._threads) - self.min)):
# Put a number of shutdown requests on the queue equal
# to 'amount'. Once each of those is processed by a worker,
# that worker will terminate and be culled from our list
# in self.put.
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
worker.join(timeout)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
if SSL and isinstance(c.socket, SSL.ConnectionType):
# pyOpenSSL.socket.shutdown takes no args
c.socket.shutdown()
else:
c.socket.shutdown(socket.SHUT_RD)
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt), exc1:
pass
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
*args: the arguments to create the wrapped SSL.Connection(*args).
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout'):
exec """def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class CherryPyWSGIServer(object):
"""An HTTP server for WSGI.
bind_addr: The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.
wsgi_app: the WSGI 'application callable'; multiple WSGI applications
may be passed as (path_prefix, app) pairs.
numthreads: the number of worker threads to create (default 10).
server_name: the string to set for WSGI's SERVER_NAME environ entry.
Defaults to socket.gethostname().
max: the maximum number of queued requests (defaults to -1 = no limit).
request_queue_size: the 'backlog' argument to socket.listen();
specifies the maximum number of queued connections (default 5).
timeout: the timeout in seconds for accepted connections (default 10).
nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
option.
protocol: the version string to write in the Status-Line of all
HTTP responses. For example, "HTTP/1.1" (the default). This
also limits the supported features used in the response.
SSL/HTTPS
---------
The OpenSSL module must be importable for SSL functionality.
You can obtain it from http://pyopenssl.sourceforge.net/
ssl_certificate: the filename of the server SSL certificate.
ssl_privatekey: the filename of the server's private key file.
If either of these is None (both are None by default), this server
will not use SSL. If both are given and are valid, they will be read
on server start and used in the SSL context for the listening socket.
"""
protocol = "HTTP/1.1"
_bind_addr = "127.0.0.1"
version = "CherryPy/3.1.2"
ready = False
_interrupt = None
nodelay = True
ConnectionClass = HTTPConnection
environ = {}
# Paths to certificate and private key files
ssl_certificate = None
ssl_private_key = None
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
if callable(wsgi_app):
# We've been handed a single wsgi_app, in CP-2.1 style.
# Assume it's mounted at "".
self.wsgi_app = wsgi_app
else:
# We've been handed a list of (path_prefix, wsgi_app) tuples,
# so that the server can call different wsgi_apps, and also
# correctly set SCRIPT_NAME.
warnings.warn("The ability to pass multiple apps is deprecated "
"and will be removed in 3.2. You should explicitly "
"include a WSGIPathInfoDispatcher instead.",
DeprecationWarning)
self.wsgi_app = WSGIPathInfoDispatcher(wsgi_app)
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 0777)
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
# Probably a DNS issue. Assume IPv4.
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error, msg:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error, msg
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
while self.ready:
self.tick()
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay:
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_certificate and self.ssl_private_key:
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey_file(self.ssl_private_key)
ctx.use_certificate_file(self.ssl_certificate)
self.socket = SSLConnection(ctx, self.socket)
self.populate_ssl_environ()
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
if (not isinstance(self.bind_addr, basestring)
and self.bind_addr[0] == '::' and family == socket.AF_INET6):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
prevent_socket_inheritance(s)
if not self.ready:
return
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
environ = self.environ.copy()
# SERVER_SOFTWARE is common for IIS. It's also helpful for
# us to pass a default value for the "Server" response header.
if environ.get("SERVER_SOFTWARE") is None:
environ["SERVER_SOFTWARE"] = "%s WSGI Server" % self.version
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
environ["ACTUAL_SERVER_PROTOCOL"] = self.protocol
environ["SERVER_NAME"] = self.server_name
if isinstance(self.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
environ["SERVER_PORT"] = ""
else:
environ["SERVER_PORT"] = str(self.bind_addr[1])
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
environ["REMOTE_ADDR"] = addr[0]
environ["REMOTE_PORT"] = str(addr[1])
conn = self.ConnectionClass(s, self.wsgi_app, environ)
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error, x:
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error, x:
if x.args[0] not in socket_errors_to_ignore:
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
def populate_ssl_environ(self):
"""Create WSGI environ entries to be merged into each request."""
cert = open(self.ssl_certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
## 'SSL_PROTOCOL': 'SSLv2',
## SSL_CIPHER string The cipher specification name
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
# Server certificate attributes
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
self.environ.update(ssl_environ)
| {
"repo_name": "cread/ec2id",
"path": "cherrypy/wsgiserver/__init__.py",
"copies": "3",
"size": "73214",
"license": "apache-2.0",
"hash": -1220438902395194600,
"line_mean": 38.8104793757,
"line_max": 92,
"alpha_frac": 0.5087141803,
"autogenerated": false,
"ratio": 4.623847416950865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6632561597250866,
"avg_score": null,
"num_lines": null
} |
"""A histoprogramming utility to go from some input lines to something plottable.
Licensed under the 3-clause BSD License:
Copyright (c) 2011-2014, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys
from nkutils import specialize
def optimalBinSize(vals):
"""Returns "optimal" bin size for a histogram using Scott's Rule:
b = 3.49 * stdev*N^(-1/3),
stdev = standard deviation of the N descriptor values.
From D. Scott - On optimal and data-based histograms.
"""
from nkutils import getStdDev
n = len(vals)
sd = getStdDev(vals)
return 3.49 * sd*(n**(-1/3))
def makehist(vals, incr=1, normalize=0):
"""Makes a histogram for the given vals and returns a dict.
Incr is how much each item counts for
If normalize is true, then the sum of return values is normalize.
"""
ret = {}
sum = 0
for v in vals:
if v not in ret:
ret[v] = 0
ret[v] += incr
sum += incr
if normalize:
for k in ret:
ret[k] *= (normalize/sum)
return ret
def centers2edges(centers):
"""Converts a set of bin centers into edges"""
centers = sorted(set(centers))
ret = [-1e99]
ret.extend((c1+c2)/2.0 for c1, c2 in zip(centers, centers[1:]))
ret.append(1e99)
return ret
def histfromcenters(vals, centers, incr=1, normalize=0):
"""Makes a histogram from a set of numbers, with bin centers given.
incr is how much to increment each bin per item.
if normalize is true, then output has sum=normalize"""
from bisect import bisect
edges = centers2edges(centers)
hist = []
for v in vals:
i = bisect(edges, v)
assert i > 0
hist.append(centers[i-1])
if type(vals[0]) == type(1.3) or normalize:
incr = float(incr)
hist = makehist(hist, incr, normalize)
return hist
def histogram(vals, binwidth=1, incr=1, normalize=0):
"""Makes a histogram from a set of values of arbitrary type.
binwidth determines which values all map to the same value.
incr is how much to increment each bin per item.
if normalize is true, then output has sum=normalize"""
try:
hist = [(v//binwidth)*binwidth for v in vals]
except TypeError:
hist = vals
if type(vals[0]) == type(1.3) or normalize:
incr = float(incr)
hist = makehist(hist, incr, normalize)
return hist
def cumhist(hist):
"""Takes a histogram and makes a cumulative histogram out of it"""
ret = {}
cur = 0
for k in sorted(hist):
ret[k] = hist[k]+cur
cur = ret[k]
return ret
def multhist(hists, asone=1):
"""Takes a set of histograms and combines them.
If asone is true, then returns one histogram of key->[val1, val2, ...].
Otherwise, returns one histogram per input"""
ret = {}
num = len(hists)
for i, h in enumerate(hists):
for k in sorted(h):
if k not in ret:
ret[k] = [0]*num
ret[k][i] = h[k]
if asone: return ret
# otherwise, convert to separate ones
toret = []
for i in hists:
toret.append({})
for k, vals in ret.iteritems():
for i, v in enumerate(vals):
toret[i][k] = v
return toret
def collapsebins(hist, bin, func=lambda b: b>bin):
"""Collapses bins of a histogram into one, based on the given function.
The function is given all bins (keys) and for every bin that tests positive,
it will collapse it to the chosen bin.
This function copies the given histogram, rather than modifying it directly.
"""
hist = dict(**hist)
todel = []
for b in hist:
if func(b):
hist[bin] += hist[b]
todel.append(b)
for b in todel:
del hist[b]
return hist
if __name__ == '__main__':
lines = [specialize(l.strip()) for l in sys.stdin]
if not lines: sys.exit()
args = [specialize(a) for a in sys.argv[1:]]
hist = histogram(lines, *args)
for k in sorted(hist):
print '%s %s' % (k, hist[k])
| {
"repo_name": "neeraj-kumar/nkpylib",
"path": "nkhist.py",
"copies": "1",
"size": "5481",
"license": "bsd-3-clause",
"hash": 798612625080239600,
"line_mean": 35.0592105263,
"line_max": 81,
"alpha_frac": 0.6591862799,
"autogenerated": false,
"ratio": 3.808895066018068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9950095324969384,
"avg_score": 0.0035972041897368323,
"num_lines": 152
} |
"""A home for constants."""
# Python modules
from __future__ import division
# 3rd party modules
# Our modules
# import vespa.common.ordered_dict as ordered_dict
from collections import OrderedDict as ordered_dict
# We could also use numpy.pi in place of 3.14..., but it's useful to limit
# this module's dependencies.
DEGREES_TO_RADIANS = 3.1415926535897931 / 180
RADIANS_TO_DEGREES = 180 / 3.1415926535897931
class PaneNames(object):
""" A container that allows me to represent wxWidgets pane names (which
are strings) in a way that Python will complain loudly about if I mistype
one of them (e.g. psuedo instead of pseudo). A pane's name is its unique
handle. They are not exposed to the user and they are not magic. (i.e.
the name "baseline" could just as well be "shrubbery".)
"""
BASELINE = "baseline"
XYZ_BROWSER = "xyz_browser"
SUM_SPECTRA = "sum_spectra"
TIME_SERIES = "time_series"
KORNAK = "kornak"
LORENTZ_GAUSS = "lorentz_gauss"
PSEUDO_2D = "pseudo_2d"
HEADER = "header"
SPECTRAL = "spectral"
SPATIAL = "spatial"
FILEBAR = "filebar"
SVD = "svd"
FITTING = "fitting"
PROCESSING = "processing"
TOOLS = ( FILEBAR, SPATIAL, BASELINE, XYZ_BROWSER, SUM_SPECTRA,
TIME_SERIES, KORNAK, LORENTZ_GAUSS, PSEUDO_2D)
class AmplitudeMultiplier(object):
""" Amplitude multiplier constants """
MIN = 0
MAX = 1e12
class Apodization(object):
""" Apodization constants """
MIN_WIDTH = 0
MAX_WIDTH = 100
# These constants are arbitrary and may change.
# However bool(NONE) is guaranteed to be False
NONE = 0
GAUSSIAN = 1
LORENTZIAN = 2
# Items for the spectral processing options dropdown
choices = ordered_dict( ( (NONE , "None"),
(GAUSSIAN , "Gaussian"),
(LORENTZIAN , "Lorentzian"),
) )
class AreaFrom(object):
""" Contains constants for area calculation options. """
# The constant values are arbitrary and may change
PLOT_1 = 1
PLOT_2 = 2
PLOT_3 = 3
class BaselineFilterMethod(object):
""" Baseline Filter Method """
NONE = 0
LOWESS = 1
BSPLINE = 2
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (NONE , "None"),
(LOWESS , "Lowess"),
(BSPLINE , "B-Spline"),
) )
class BaselineFilterRange(object):
""" Baseline Filter Range """
WHOLE_SPECTRUM = 0
BETWEEN_CURSORS = 1
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (WHOLE_SPECTRUM , "Whole Spectrum"),
(BETWEEN_CURSORS , "Between Cursors"),
) )
class DcOffset(object):
""" DC offset constants """
MIN = -1e5
MAX = 1e5
class EddyCurrentCorrection(object):
""" Eddy current correction constants """
# These constants are arbitrary and may change.
# However bool(NONE) is guaranteed to be False
NONE = 0
SIMPLE = 1
MIN_NOISE_FILTER = 0.1
MAX_NOISE_FILTER = 100.0
# Items for the spectral processing options dropdown
choices = ordered_dict( ( (NONE , "Off"),
(SIMPLE , "Simple"),
) )
class FittingLineshapeModel(object):
""" Lineshape model """
VOIGT = 0
LORENTZ = 1
GAUSS = 2
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (VOIGT , "Voigt"),
(LORENTZ , "Lorentz"),
(GAUSS , "Gauss"),
) )
class FittingBaselineAlgorithm(object):
""" Baseline Algorithm """
NONE = 0
VARIABLE_KNOT_SPLINE = 1
FIXED_KNOT_SPLINE = 2
WAVELET_FILTER = 3
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (NONE , "None"),
(VARIABLE_KNOT_SPLINE , "Variable Knot Spline"),
(FIXED_KNOT_SPLINE , "Fixed Knot Spline"),
(WAVELET_FILTER , "Wavelet Filter"),
) )
class FittingMacromoleculePeak(object):
""" Macromolecular Peak Method """
GROUPED_PEAKS = 0
INDIVIDUAL_PEAKS = 1
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (GROUPED_PEAKS , "Grouped Peaks"),
(INDIVIDUAL_PEAKS , "Individual Peaks"),
) )
class FittingMacromoleculeLinewidths(object):
""" Macromolecular Linewidths Method """
LUMPED = 0
INDEPENDENT = 1
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (LUMPED , "Lumped"),
(INDEPENDENT , "Independent"),
) )
class FittingOptimizeAlgorithm(object):
""" Optimization Algorithm """
NONE = 0
CONSTRAINED_LEVENBERG_MARQUARDT = 1
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (NONE , "None"),
(CONSTRAINED_LEVENBERG_MARQUARDT , "ccfit"),
) )
class FittingOptimizeWeights(object):
""" Macromolecular Linewidths Method """
EVEN_WEIGHTING = 0
LOCAL_WEIGHTING = 1
# Items for the Voigt fitting tool radio buttons
choices = ordered_dict( ( (EVEN_WEIGHTING , "Even Weighting"),
(LOCAL_WEIGHTING , "Local Weighting"),
) )
class FittingAmplitudeMultiplier(object):
""" Metabolites amplitude multiplier constants """
MIN = 0.001
MAX = 1000.0
class FittingBaselineBsplineOrder(object):
""" Baseline B-Spline order constants """
MIN = 1.0
MAX = 5.0
class FittingBaselineLowessWindowSize(object):
""" Baseline metabolites region Lowess window size (Hz) constants """
MIN = 0.00001
MAX = 5000.0
class FittingBaselineUnderestimation(object):
""" Baseline first pass underestimation constants """
MIN = -50.0
MAX = 100.0
class FittingLineWidth(object):
""" Metabolites line width constants """
MIN = 0.001
MAX = 1000.0
class FittingMacroMoleculeLines(object):
""" Metabolites Lorentz-Gauss macro molecule model lines constants """
MIN = 1
MAX = 50
class FittingMacroMoleculeAdjustment(object):
""" Metabolites Lorentz-Gauss macro molecule adjustment (spinner) constants """
MIN = 0
MAX = 100
class FittingOptimizationAmplitude(object):
""" Fitting Lorentz-Gauss optimization metabolite amplitude constants """
MIN = 1
MAX = 10000
class FittingOptimizationAreaWeight(object):
""" Fitting Lorentz-Gauss optimization area weight constants """
MIN = 0.0001
MAX = 1000000.0
class FittingOptimizationAlgorithmIterations(object):
""" Fitting Lorentz-Gauss optimization algorithm max iterations constants """
MIN = 1
MAX = 10000
class FittingOptimizationConfidenceAlpha(object):
""" Fitting Lorentz-Gauss optimization algorithm confidence alpha constants """
MIN = 0.05
MAX = 0.9999
class FittingOptimizationFrequency(object):
""" Fitting Lorentz-Gauss optimization metabolite frequency constants """
MIN = 1
MAX = 10000
class FittingOptimizationGlobalIterations(object):
""" Fitting Lorentz-Gauss optimization global iterations constants """
MIN = 1
MAX = 1000
class FittingOptimizationLocalMultiplier(object):
""" Fitting Lorentz-Gauss optimization "LW Local Mult" (???) constants """
MIN = 1.0
MAX = 100.0
class FittingOptimizationPhase1(object):
""" Fitting Lorentz-Gauss optimization metabolite phase 1 constants """
MIN = 1
MAX = 5000
class FittingOptimizationStopTolerance(object):
""" Fitting Lorentz-Gauss optimization algorithm stop tolerance constants """
MIN = 0.000000001
MAX = 100000.0
class FittingOptimizationTaTb(object):
""" Fitting Lorentz-Gauss optimization Ta=Tb constants """
MIN = 0.001
MAX = 200.0
class FittingPeakPpm(object):
""" Metabolites peak PPM constants """
MIN = -5000.0
MAX = 5000.0
class FittingPeakSearchRange(object):
""" Metabolites peak search range constants """
MIN = 0.001
MAX = 10.0
class FrequencyShift(object):
""" Frequency shift constants """
MIN = -1e4
MAX = 1e4
class Phase_1(object):
""" First order phase constants """
MIN = -1e4
MAX = 1e4
MIN_PIVOT = -1000
MAX_PIVOT = 1000
class Plot3Function(object):
""" Contains constants for plot 3 function options. """
# The constant values are arbitrary and may change.
# However, bool(NONE) is guaranteed to be False
NONE = 0
RESIDUAL_1_MINUS_2 = 1
RESIDUAL_2_MINUS_1 = 2
TIME_SERIES_SUM_1 = 3
TIME_SERIES_SUM_2 = 4
class SpatialFilter(object):
""" Spatial filter constants """
# These constants are arbitrary and may change.
# However bool(NONE) is guaranteed to be False
NONE = 0
HAMMING = 1
EXPONENTIAL = 2
GAUSSIAN = 3
# Items for the spatial processing options dropdown
choices = ordered_dict( ( (NONE , "None"),
(HAMMING , "Hamming"),
(EXPONENTIAL , "Exponential"),
(GAUSSIAN , "Gaussian"),
) )
class SpatialTranspose(object):
""" Spatial transposition constants """
# These constants are arbitrary and may change.
# However bool(NONE) is guaranteed to be False
NONE = 0
TRANSPOSE_XY = 1 # x <-> y
TRANSPOSE_XZ = 2 # x <-> z
TRANSPOSE_YZ = 3 # y <-> z
TRANSPOSE_XYZ = 4 # x->y->z->x
# Items for the spatial processing options
choices = ordered_dict( ( (NONE , "None"),
(TRANSPOSE_XY , "Transpose_XY"),
(TRANSPOSE_XZ , "Transpose_XZ"),
(TRANSPOSE_YZ , "Transpose_YZ"),
(TRANSPOSE_XYZ , "Transpose_XYZ"),
) )
class WaterExtrapolation(object):
""" Water extrapolation constants """
# These constants are arbitrary and may change.
# However bool(NONE) is guaranteed to be False
NONE = 0
LINEAR = 1
AR_MODEL = 2
MIN_POINTS = 1
MAX_POINTS = 1000
# Items for the spectral processing options dropdown
choices = ordered_dict( ( (NONE , "None"),
(LINEAR , "Linear"),
(AR_MODEL , "AR Model"),
) )
class WaterFilter(object):
""" Water filter constants """
# These constants are arbitrary and may change.
# However bool(NONE) is guaranteed to be False
NONE = 0
HAMMING = 1
FIR = 2
HLSVD = 3
# Items for the spectral processing options dropdown
choices = ordered_dict( ( (NONE , "None"),
(HAMMING , "Hamming"),
(FIR , "FIR"),
(HLSVD , "HLSVD"),
) )
# FIR (Finite Impulse Response) constants
MIN_FIR_RIPPLE = 0
MAX_FIR_RIPPLE = 500
MIN_FIR_HALF_WIDTH = 0
MAX_FIR_HALF_WIDTH = 500
# HLSVD constants
MIN_HLSVD_DATA_POINTS = 1
MAX_HLSVD_DATA_POINTS = 2048
MIN_HLSVD_SINGULAR_VALUES = 1
MAX_HLSVD_SINGULAR_VALUES = 50
MIN_HLSVD_MATRIX_POINTS = 1
MAX_HLSVD_MATRIX_POINTS = 1024
MIN_HLSVD_ITERATIONS = 1
MAX_HLSVD_ITERATIONS = 2000
class XYZImageType(object):
""" XYZ Image Type constants """
# Integral Image along 0:X-Y/1:Y-Z/2:X-Z direction
MAGNITUDE = 0
REAL = 1
IMAGINARY = 2
MAGNITUDE_PLUS_MASK = 3
B0_MAP = 4
# Items for the XYZ browser options
choices = ordered_dict( ( (MAGNITUDE , "Magnitude"),
(REAL , "Real"),
(IMAGINARY , "Imaginary"),
(MAGNITUDE_PLUS_MASK , "Magnitude plus Mask"),
(B0_MAP , "B0 Map"),
) )
class XYZOrientation(object):
""" XYZ Orientation constants """
# Integral Image along 0:X-Y/1:Y-Z/2:X-Z direction
XY_ORIENTATION = 0
YZ_ORIENTATION = 1
XZ_ORIENTATION = 2
# Items for the XYZ browser options
choices = ordered_dict( ( (XY_ORIENTATION , "XY_Orientation"),
(YZ_ORIENTATION , "YZ_Orientation"),
(XZ_ORIENTATION , "XZ_Orientation"),
) )
class ZeroFillMultiplier(object):
""" Zero fill multiplier constants. The zero fill is not only limited
to a specific range, but it must also be an integral power of 2.
"""
_EXPONENT = 5
MIN = 1
MAX = 2 ** _EXPONENT
# Items for the spatial processing options
choices = [ (2 ** i, str(2 ** i)) for i in range(0, _EXPONENT + 1) ]
choices = ordered_dict(choices)
| {
"repo_name": "beOn/hcpre",
"path": "hcpre/duke_siemens/constants.py",
"copies": "1",
"size": "13790",
"license": "bsd-3-clause",
"hash": -3535862691005569500,
"line_mean": 30.3409090909,
"line_max": 88,
"alpha_frac": 0.5553299492,
"autogenerated": false,
"ratio": 3.8041379310344827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48594678802344826,
"avg_score": null,
"num_lines": null
} |
"""A home-grown job-queueing system based on Postgres
To avoid having to run another system, we use a PostgreSQL table as our
queue. If our pipeline for each dataset looks like `A -> B -> C -> D`,
then we store the timestamps of the last time we ran `A`, `B`, `C`, and
`D` for each dataset. A worker for step `B` in the pipeline repeatedly:
1. `BEGIN` a transaction
2. Find a single dataset where `timestamp(A) > timestamp(B)`. For
concurrency reasons, we take out a row-level lock via `FOR UPDATE`
and specify `SKIP LOCKED`, so that multiple workers will find
different datasets.
3. Do the processing step (presumably outside of PostgreSQL). Any
updates to any PostgreSQL tables should be wrapped in the same
transaction from 1.
4. Update `timestamp(B)` and COMMIT the transaction.
These transactions and timestamps serve as synchronization points -- if
there are dependencies between different processing steps, then they
should be split into separate processing steps to avoid stale data.
The benefits of this arrangement is that we get transactional
consistency and other ACID guarantees for free while avoiding bringing
a separate dependency like Redis. The downside is that this is a fairly
complicated and home-grown solution and isn't very scalable.
Luckily, the number of datasets to process is small (~2500, which grows
by a couple hundred a year) and this is a batch-mode job queue, not a
low-latency online queue, so scalability shouldn't be a problem (having
a couple dozen long-lived transactions should be easy for Postgres).
"""
import contextlib
from typing import Iterator, Optional, Tuple
import sqlalchemy as sa
from .base import Base
from .metadata import AssetType, Dataset
queue = sa.Table(
"queue", Base.metadata,
sa.Column("dataset_id", sa.CHAR(9), sa.ForeignKey(Dataset.__table__.c.id),
primary_key=True),
sa.Column("updated_at", sa.TIMESTAMP(timezone=False), nullable=False),
sa.Column("scraped_at", sa.TIMESTAMP(timezone=False), nullable=True),
sa.Column("processed_at", sa.TIMESTAMP(timezone=False), nullable=True),
sa.Column("retries", sa.SMALLINT, nullable=False, default=0),
) # yapf: disable
# TODO(alan): Use NewType for dataset_id
Pair = Tuple[Optional[sa.engine.base.Connection], Optional[str]]
def update_from_metadata(conn: sa.engine.base.Connection) -> None:
dataset = Dataset.__table__
query = sa.text(f"""
INSERT INTO {queue.name}
({queue.c.dataset_id.name}, {queue.c.updated_at.name},
{queue.c.retries.name})
SELECT {dataset.c.id.name}, LEAST({dataset.c.updated_at.name}, NOW()), 0
FROM {dataset.name}
WHERE {dataset.c.asset_type} IN ('{AssetType.DATASET.value}',
'{AssetType.MAP.value}')
AND array_length({dataset.c.parents}, 1) IS NULL
ON CONFLICT ({queue.c.dataset_id.name}) DO UPDATE
SET {queue.c.updated_at.name} = excluded.{dataset.c.updated_at.name},
{queue.c.retries.name} = 0
""")
conn.execute(query)
def _next_row(conn: sa.engine.base.Connection, query,
success) -> Iterator[Pair]:
query = (query
.where(queue.c.retries < 3)
.order_by(sa.asc(queue.c.retries))
.limit(1)
.with_for_update(skip_locked=True)) # yapf: disable
fail = queue.update().values(retries=queue.c.retries + 1)
trans = conn.begin()
row = conn.execute(query).fetchone()
if row is None:
yield None, None
return
dataset_id = row.dataset_id
try:
try:
yield conn, row.dataset_id
conn.execute(success.where(queue.c.dataset_id == dataset_id))
trans.commit()
except Exception:
conn.execute(fail.where(queue.c.dataset_id == dataset_id))
trans.commit()
raise
except sa.exc.InternalError as e:
assert "current transaction is aborted" in str(e)
root = trans
while root._actual_parent is not None:
root = root._actual_parent
root.rollback()
conn.execute(fail.where(queue.c.dataset_id == dataset_id))
conn.close()
raise
@contextlib.contextmanager
def next_row_to_scrape(conn: sa.engine.Connection) -> Iterator[Pair]:
query = (sa
.select([queue.c.dataset_id])
.where(sa.and_(
sa.or_(
queue.c.updated_at >= queue.c.scraped_at,
queue.c.scraped_at.is_(None)),
))) # yapf: disable
success = queue.update().values(scraped_at=sa.func.now(), retries=0)
yield from _next_row(conn, query, success)
@contextlib.contextmanager
def next_row_to_process(conn: sa.engine.Connection) -> Iterator[Pair]:
query = (sa
.select([queue.c.dataset_id])
.where(sa.and_(
queue.c.scraped_at.isnot(None),
sa.or_(
queue.c.scraped_at >= queue.c.processed_at,
queue.c.processed_at.is_(None)),
))) # yapf: disable
success = queue.update().values(processed_at=sa.func.now(), retries=0)
yield from _next_row(conn, query, success)
| {
"repo_name": "adi-foundry/nycodex",
"path": "nycodex/db/queue.py",
"copies": "1",
"size": "5228",
"license": "apache-2.0",
"hash": 6131081676243189000,
"line_mean": 37.7259259259,
"line_max": 78,
"alpha_frac": 0.6440321347,
"autogenerated": false,
"ratio": 3.705173635719348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9849205770419348,
"avg_score": 0,
"num_lines": 135
} |
# a homophonic cipher
# needs to have a homophoinic letter substitution table
import sys; sys.path.append(".."); sys.path.append("homophonic")
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from homophonic_table_creator import TableCreator
from letter_distribution import LangData
class TableGetter():
# check if the user has a homophonic substitution table and act accordingly
def does_user_have_the_table(self):
choices = ["no", "yes"]
print("Do you have a homophonic substitution table?")
for idx, choice in enumerate(choices):
print("{}: {}".format(idx, choice))
self.has_table = choices[int(input("Enter a number please: "))] # yes or no
# if user has table, use it, else make it
def get_or_make_table(self):
# if the user has the table, ask for the absolute path to it
if self.has_table == "yes":
# create a file explorer windows for user to select the key file
print("Select your homophonic substitution table")
Tk().withdraw() # remove excess windows
self.key_file = askopenfilename() # opens a dialog
else:
print("You now select your language word list")
langdata = LangData()
langdata.main()
TableCreator(langdata.letter_distribution).excecute()
self.has_table = "yes"
self.get_or_make_table()
if __name__ == "__main__":
pass
| {
"repo_name": "himrasmussen/PyCipher",
"path": "homophonic/table_getter.py",
"copies": "1",
"size": "1523",
"license": "mit",
"hash": -929651335152941800,
"line_mean": 36.1463414634,
"line_max": 83,
"alpha_frac": 0.6198292843,
"autogenerated": false,
"ratio": 4.094086021505376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5213915305805376,
"avg_score": null,
"num_lines": null
} |
# a homophonic cipher
# needs to have a homophoinic letter substitution table
import sys; sys.path.append(".."); sys.path.append("homophonic")
import copy
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from cryptobase import CryptoBase
from homophonic_table_creator import TableCreator
from letter_distribution import LangData
class HomophonicCipher(CryptoBase):
def __init__(self, key_file, *args, **kwargs):
self.key_file = key_file
super(HomophonicCipher, self).__init__(*args, **kwargs)
# import the homophonic letter substitution table
def import_substitution_table(self):
with open(self.key_file) as f:
self.key = {}
table_data_lines = f.read().splitlines()
# seperate the letter from the numbers on each line
for line in table_data_lines:
cur_letter = line[0] # extract letter
numbers = line[2:].split() # extract numbers
self.key[cur_letter] = numbers
# make a backup key for use later
self.key_backup = copy.deepcopy(self.key)
# encrypt the message
def encrypt(self):
print(self.msg)
# encrypt every character in the message
for char in self.msg:
if char in self.alphabet:
self.new_msg += self.key[char].pop() if char in self.alphabet else char
# if all the numbers for the letter have been used, reinitialize the numbers
if not self.key[char]:
self.key[char] = self.key_backup[char]
self.new_msg += " "
# decrypt the message
def decrypt(self):
for number in self.msg.split():
for letter, numbers in self.key.items(): # optimer?
if number in numbers:
self.new_msg += letter
# excecute the operation
def excecute(self, mode):
if mode == "encrypt":
self.encrypt()
elif mode == "decrypt":
self.decrypt()
print(self.new_msg)
self.done()
if __name__ == "__main__":
cipher = HomophonicCipher(key="foo", msg="bla bla bla", mode="encrypt")
cipher.does_user_have_the_table()
cipher.get_or_make_table()
cipher.import_substitution_table()
cipher.excecute(cipher.mode)
cipher = HomophonicCipher(key="foo", msg=cipher.new_msg, mode="decrypt")
cipher.does_user_have_the_table()
cipher.get_or_make_table()
cipher.import_substitution_table()
cipher.excecute(cipher.mode)
| {
"repo_name": "himrasmussen/PyCipher",
"path": "homophonic/homophonic_cipher.py",
"copies": "1",
"size": "2625",
"license": "mit",
"hash": -8112990081034443000,
"line_mean": 33.0909090909,
"line_max": 96,
"alpha_frac": 0.5961904762,
"autogenerated": false,
"ratio": 3.977272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5073463203472727,
"avg_score": null,
"num_lines": null
} |
# a homophonic cipher
# needs to have a homophoinic letter substitution table
import copy
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from cryptobase import CryptoBase
from homophonic_table_creator import TableCreator
from letter_distribution import LangData
class HomophonicCipher(CryptoBase):
def __init__(self, *args, **kwargs):
self.has_table = None
self.key_backup = {}
super(HomophonicCipher, self).__init__(*args, **kwargs)
# check if the user has a homophonic substitution table and act accordingly
def does_user_have_the_table(self):
choices = ["no", "yes"]
print("Do you have a homophonic substitution table?")
for idx, choice in enumerate(choices):
print("{}: {}".format(idx, choice))
self.has_table = choices[int(input("Enter a number please: "))] # yes or no
# if user has table, use it, else make it
def get_or_make_table(self):
# if the user has the table, ask for the absolute path to it
if self.has_table == "yes":
# create a file explorer windows for user to select the key file
print("Select your homophonic substitution table")
Tk().withdraw() # remove excess windows
self.key_file = askopenfilename() # opens a dialog
else:
print("You now select your language word list")
langdata = LangData()
langdata.main()
TableCreator(langdata.letter_distribution).excecute()
self.has_table = "yes"
self.get_or_make_table()
# import the homophonic letter substitution table
def import_substitution_table(self):
with open(self.key_file) as f:
self.key = {}
table_data_lines = f.read().splitlines()
# seperate the letter from the numbers on each line
for line in table_data_lines:
cur_letter = line[0] # extract letter
numbers = line[2:].split() # extract numbers
self.key[cur_letter] = numbers
# make a backup key for use later
self.key_backup = copy.deepcopy(self.key)
# encrypt the message
def encrypt(self):
print(self.msg)
# encrypt every character in the message
for char in self.msg:
if char in self.alphabet:
self.new_msg += self.key[char].pop() if char in self.alphabet else char
# if all the numbers for the letter have been used, reinitialize the numbers
if not self.key[char]:
self.key[char] = self.key_backup[char]
self.new_msg += " "
# decrypt the message
def decrypt(self):
for number in self.msg.split():
for letter, numbers in self.key.items(): # optimer?
if number in numbers:
self.new_msg += letter
# excecute the operation
def excecute(self, mode):
if mode == "encrypt":
self.encrypt()
elif mode == "decrypt":
self.decrypt()
print(self.new_msg)
self.done()
if __name__ == "__main__":
cipher = HomophonicCipher(key="foo", msg="bla bla bla", mode="encrypt")
cipher.does_user_have_the_table()
cipher.get_or_make_table()
cipher.import_substitution_table()
cipher.excecute(cipher.mode)
cipher = HomophonicCipher(key="foo", msg=cipher.new_msg, mode="decrypt")
cipher.does_user_have_the_table()
cipher.get_or_make_table()
cipher.import_substitution_table()
cipher.excecute(cipher.mode)
| {
"repo_name": "himrasmussen/PyCipher",
"path": "homophonic_cipher.py",
"copies": "1",
"size": "3719",
"license": "mit",
"hash": -4135545183103933000,
"line_mean": 35.1067961165,
"line_max": 96,
"alpha_frac": 0.5885990858,
"autogenerated": false,
"ratio": 4.095814977973569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184414063773568,
"avg_score": null,
"num_lines": null
} |
"""A hook for modifying parameter values read from the WMT client."""
from wmt.utils.hook import yaml_dump
def apply_boundary_condition(env, side='left'):
boundary = env['_boundary_condition_' + side].encode('utf-8')
if boundary == 'Dirichlet':
bc = []
bc.append(boundary)
bc.append(float(env['_bc_' + side + '_dirichlet_stage']))
bc.append(float(env['_bc_' + side + '_dirichlet_x_momentum']))
bc.append(float(env['_bc_' + side + '_dirichlet_y_momentum']))
else:
bc = boundary
return bc
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
boundary_conditions = {}
boundaries = ['left', 'right', 'top', 'bottom']
for side in boundaries:
boundary_conditions[side] = apply_boundary_condition(env, side)
env['dict_of_boundary_conditions'] = str(boundary_conditions)
# For debugging
yaml_dump('_env.yaml', env)
| {
"repo_name": "csdms/wmt-metadata",
"path": "metadata/AnugaSedRectangular/hooks/pre-stage.py",
"copies": "1",
"size": "1039",
"license": "mit",
"hash": -9102546280493783000,
"line_mean": 27.8611111111,
"line_max": 71,
"alpha_frac": 0.6130895091,
"autogenerated": false,
"ratio": 3.8198529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.993294245027647,
"avg_score": 0,
"num_lines": 36
} |
"""A hook for modifying parameter values read from the WMT client."""
from wmt.utils.hook import yaml_dump
max_sides = 8
def apply_boundary_condition(env, side='side1'):
boundary = env['_boundary_condition_' + side].encode('utf-8')
if boundary == 'Dirichlet':
bc = []
bc.append(boundary)
bc.append(float(env['_bc_' + side + '_dirichlet_stage']))
bc.append(float(env['_bc_' + side + '_dirichlet_x_momentum']))
bc.append(float(env['_bc_' + side + '_dirichlet_y_momentum']))
else:
bc = boundary
return bc
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
sides = []
for i in range(max_sides):
name = 'side' + str(i+1)
if env['_boundary_condition_' + name] != 'Off':
sides.append(name)
boundary_tags = {}
for i, side in enumerate(sides):
boundary_tags[side] = [i]
env['dict_of_boundary_tags'] = str(boundary_tags)
boundary_conditions = {}
for side in sides:
boundary_conditions[side] = apply_boundary_condition(env, side)
env['dict_of_boundary_conditions'] = str(boundary_conditions)
yaml_dump('_env.yaml', env)
| {
"repo_name": "csdms/wmt-metadata",
"path": "metadata/AnugaSedOutline/hooks/pre-stage.py",
"copies": "1",
"size": "1295",
"license": "mit",
"hash": -460593016373203700,
"line_mean": 25.9791666667,
"line_max": 71,
"alpha_frac": 0.5961389961,
"autogenerated": false,
"ratio": 3.647887323943662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4744026320043662,
"avg_score": null,
"num_lines": null
} |
"""A hook for modifying parameter values read from the WMT client."""
import os
import shutil
from wmt.utils.hook import yaml_dump, find_simulation_input_file
from pbs_server.file import (IlambConfigFile,
get_region_labels_txt,
get_region_labels_ncdf)
gfed_region_names = ['global', 'bona', 'tena', 'ceam', 'nhsa', 'shsa',
'euro', 'mide', 'nhaf', 'shaf', 'boas', 'ceas',
'seas', 'eqas', 'aust']
def load_custom_regions(regions_file):
"""Get a list of custom region labels from a file.
Parameters
----------
regions_file : str
The path to an ILAMB custom regions file.
Returns
-------
list
A list of custom region names.
"""
try:
return get_region_labels_ncdf(regions_file)
except:
return get_region_labels_txt(regions_file)
else:
return []
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
var_list = []
for k, v in env.items():
if k.startswith('_variable') and v != 'Off':
var_list.append(str(v))
has_relationships = (env['_relationships'] == 'Yes') and \
(len(var_list) > 1)
f = IlambConfigFile(var_list,
relationships=has_relationships,
title=env['simulation_name'])
f.setup()
f.write()
model_list = []
for k, v in env.items():
if k.startswith('_model_') and v == 'On':
model_list.append(k.lstrip('_model_').encode('utf-8'))
env['models'] = model_list
region_list = []
for r in gfed_region_names:
if env['_region_' + r] == 'On':
region_list.append(r)
env['regions'] = region_list
if env['_define_regions_file'] != 'Off':
env['define_regions'] = env['_define_regions_file']
shutil.copy(
find_simulation_input_file(env['define_regions']),
os.curdir)
custom_regions_list = load_custom_regions(env['define_regions'])
env['regions'].extend(custom_regions_list)
# For debugging.
env['_sources_file'] = f.sources_file
env['_var_list'] = var_list
env['_has_relationships'] = has_relationships
yaml_dump('_env.yaml', env)
| {
"repo_name": "csdms/wmt-metadata",
"path": "metadata/ILAMB/hooks/pre-stage.py",
"copies": "1",
"size": "2408",
"license": "mit",
"hash": -4470623128888226300,
"line_mean": 27.3294117647,
"line_max": 72,
"alpha_frac": 0.5527408638,
"autogenerated": false,
"ratio": 3.7333333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9781172236349019,
"avg_score": 0.000980392156862745,
"num_lines": 85
} |
"""A hook for modifying parameter values read from the WMT client."""
import os
import shutil
from wmt.utils.hook import find_simulation_input_file, yaml_dump
from topoflow_utils.hook import assign_parameters
file_list = []
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['model_end_date'] = long(env['model_start_date']) \
+ long(env['_run_duration'])
env['input_var_source'] = 'WMT'
env['output_filename'] = 'FrostnumberGeo_output.nc'
# Todo: Remove these hooks when methods are implemented.
env['degree_days_method'] = 'MinJanMaxJul' # will become a choice
env['n_precipitation_grid_fields'] = 0
env['n_soilproperties_grid_fields'] = 0
env['calc_surface_frostnumber'] = False
env['calc_stefan_frostnumber'] = False
# XXX: This is my choice for implementing in WMT.
env['n_temperature_grid_fields'] = 1
assign_parameters(env, file_list)
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
env['_file_list'] = file_list
yaml_dump('_env.yaml', env)
| {
"repo_name": "csdms/wmt-metadata",
"path": "metadata/FrostNumberGeoModel/hooks/pre-stage.py",
"copies": "1",
"size": "1249",
"license": "mit",
"hash": -311074938657756540,
"line_mean": 26.7555555556,
"line_max": 70,
"alpha_frac": 0.645316253,
"autogenerated": false,
"ratio": 3.538243626062323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4683559879062323,
"avg_score": null,
"num_lines": null
} |
"""A hook for refreshing a component's metadata."""
import os
import subprocess
import json
from wmt.utils.ssh import get_host_info, open_connection_to_host
from wmt.config import site
import pbs_server.models as models
import pbs_server.variables as variables
hostname = 'siwenna.colorado.edu'
models_dir = '/home/csdms/ilamb/MODELS-by-project/PBS'
data_dir = '/home/csdms/ilamb/DATA-by-project/PBS'
# Note that I modified info.json to add login credentials.
def get_pbs_listing(pbs_dir):
"""
Get a listing of model outputs or benchmark data uploaded through the PBS.
Parameters
----------
pbs_dir : str
The path to the directory of uploaded PBS files.
Returns
-------
list
A list of uploaded model output files.
"""
info = get_host_info(hostname)
cmd = 'ls {}'.format(pbs_dir)
ssh = open_connection_to_host(info['name'],
info['username'],
password=info['password'])
_, stdout, _ = ssh.exec_command(cmd)
file_list = stdout.readlines()
ssh.close()
for i, item in enumerate(file_list):
file_list[i] = item.rstrip()
return file_list
def update_models(params):
"""Updates ILAMB metadata for model outputs uploaded through the PBS.
Parameters
----------
params : list
The WMT parameters for the ILAMB component.
"""
models_files = get_pbs_listing(models_dir)
# Extract model names from file list and sort, removing duplicates.
model_list = []
for pbs_file in models_files:
model_name = models.get_name(pbs_file)
if model_name not in model_list:
model_list.append(model_name)
model_list.sort()
models.update_parameters(params, model_list)
def update_variables(params):
"""Updates ILAMB metadata for benchmark data uploaded through the PBS.
Parameters
----------
params : list
The WMT parameters for the ILAMB component.
"""
data_files = get_pbs_listing(data_dir)
# Extract variable names from file list, removing duplicates.
variable_dict = {}
for pbs_file in data_files:
variable_name = variables.get_name(pbs_file)
if variable_name not in variable_dict.keys():
variable_dict[variable_name] = pbs_file
variables.update_parameters(params, variable_dict.keys())
# Create or update the .cfg.tmpl file for each variable.
for var_name, file_name in variable_dict.items():
variables.update_template(var_name, file_name)
def execute(name):
"""Hook called by components/refresh API.
Parameters
----------
name : str
The name of the component (here, 'ILAMB').
"""
parameters_file = os.path.join(site['db'], 'components', name,
'db', 'parameters.json')
with open(parameters_file, 'r') as fp:
params = json.load(fp)
update_models(params)
update_variables(params)
# Note that I had to give `a+w` permissions to the parameters file.
with open(parameters_file, 'w') as fp:
json.dump(params, fp, indent=4)
# Touch the wsgi script so the WMT client reads the changes.
# I had to change the permissions to `a+w` on the wsgi script.
# And also add site['bin'].
script_path = os.path.join(site['bin'], 'wmt_wsgi_main.py')
r = subprocess.call(['touch', script_path])
| {
"repo_name": "csdms/wmt-metadata",
"path": "metadata/ILAMB/hooks/refresh.py",
"copies": "1",
"size": "3416",
"license": "mit",
"hash": 9112944227350282000,
"line_mean": 27.9491525424,
"line_max": 78,
"alpha_frac": 0.6349531616,
"autogenerated": false,
"ratio": 3.7292576419213974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9864210803521397,
"avg_score": 0,
"num_lines": 118
} |
"""A hook to add the '--reorder' argument to py.test."""
from pytest_reorder import DEFAULT_ORDER, make_reordering_hook
def pytest_addoption(parser):
"""Add the '--reorder' argument to the py.test invocation."""
group = parser.getgroup('tests reordering', 'reordering', after='general')
group.addoption(
'--reorder', type=str, nargs='*',
help=(
"A list of regular expressions matching test nodeids and one '*' to specify the order "
'of unmatched tests. The tests will be reordered according to these specs. If no '
'arguments are passed, default ordering will be applied (unit tests, unmatched tests, '
'integration tests, ui tests). \n'
'E.g. `--reorder "(^|.*/)(test_)?unit" "*" "(^|.*/)(test_)?db" "(^|.*/)(test_)?web"`.'
)
)
def pytest_collection_modifyitems(session, config, items):
"""Reorder tests if the '--reorder' command line option was added."""
reordering_request = config.getoption('reorder')
if reordering_request is None:
return # Test reordering not requested.
elif reordering_request == []:
ordering = DEFAULT_ORDER
else:
# An asterisk means 'unmatched tests'. Replace it with None for `make_reordering_hook`.
ordering = [s if s != '*' else None for s in reordering_request]
hook = make_reordering_hook(ordering)
hook(session, config, items)
| {
"repo_name": "not-raspberry/pytest_reorder",
"path": "pytest_reorder/hook.py",
"copies": "1",
"size": "1428",
"license": "mit",
"hash": -5511328188489932000,
"line_mean": 43.625,
"line_max": 99,
"alpha_frac": 0.6316526611,
"autogenerated": false,
"ratio": 3.9338842975206614,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015951247674109515,
"num_lines": 32
} |
#<a href="path/to/myimage1_original.jpg">
# <img alt="Title 1" src="path/to/myimage1_thumbnail.jpg" />
# </a>
numfiles = 12
# justified gallery
"""
for i in range(1,numfiles+1):
print('<a href="img/resized/{}.jpg">'.format(str(i).zfill(4)))
print('<img alt="Title {}" src="img/resized/{}.jpg" />'.format(i,str(i).zfill(4)))
print('</a>')
"""
#
for i in range(1,numfiles+1):
#print('<img src="img/resized/{}.jpg" width="1200" />'.format(str(i).zfill(4)))
print('<img src="img/resized/{0}.jpg" width="1200" data-highres="img/resized/{0}.jpg" />'.format(str(i).zfill(4)))
#<img src="img/resized/0001.jpg" width="1024" data-highres="img/demo/withhearts3-highres.jpg"/>
# lightbox 2
"""
for i in range(1,numfiles+1):
print('<a href="img/resized/{0}.jpg" data-lightbox="lightbox-gallery" data-title="My caption {0}"><img class="img-thumbnail col-lg-2 col-sm-3 col-xs-4" src="img/resized/{0}.jpg" /></a>'.format(str(i).zfill(4)))
"""
#colorbox
"""
for i in range (1,numfiles+1):
print('<p>\n\t<a class="group1" href="img/resized/{0}.jpg" title="alt text {0}">\n\t\t<img class="img-thumbnail col-lg-2 col-sm-3 col-xs-4" src="img/resized/{0}.jpg" />\n\t</a>\n</p>'.format(str(i).zfill(4)))
"""
#pikachoose
"""
for i in range(1,numfiles+1):
print('<li><a href="#"><img src="img/resized/{0}.jpg"/></a><span>Photo {0}.</span></li>')
"""
#owl
"""
for i in range(1,numfiles+1):
print('<div><img src="img/resized/{0}.jpg" /></div>'.format(str(i).zfill(4)))
"""
| {
"repo_name": "harfordt/harfordt.github.io",
"path": "L3_demos/img/resized/gen.py",
"copies": "1",
"size": "1581",
"license": "mit",
"hash": -7938862249148161000,
"line_mean": 23.3230769231,
"line_max": 218,
"alpha_frac": 0.5762175838,
"autogenerated": false,
"ratio": 2.591803278688525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36680208624885247,
"avg_score": null,
"num_lines": null
} |
"""Ahsay API example.
Usage:
example.py <user> <backup_name> [--username=<username> | --password=<password> | --hostname=<hostname>]
example.py (-h | --help)
example.py --version
Options:
-h --help Show this screen.
--version Show version.
--username=<username> API username
--password=<password> API password
--hostname=<hostname> API hostname with scheme (https://)
"""
from docopt import docopt
import os
from ahsay.api import AhsayAPI
from datetime import datetime, timedelta
def get_last_backup_time(api, user, backup_name):
"""
Gets back the datetime of last time the backup schedule ran successfully
:param user: The user that did the backup.
:param backup_name: Name of backup file.
"""
backups = api.call('ListBackupSets', LoginName=user)
for b in backups:
if b.attrib['Name'] != backup_name:
continue
bs = api.call(
'GetBackupSet', LoginName=user, BackupSetID=b.attrib['ID'])
timestamp = int(bs.attrib['LastBackupCompleted'])/1000
return datetime.fromtimestamp(timestamp)
if __name__ == '__main__':
arguments = docopt(__doc__, version='Ahsay-API-Example 1.0')
api_user = arguments.get('--username', None) or os.environ['AHSAY_API_USER']
api_pass = arguments.get('--password', None) or os.environ['AHSAY_API_PASS']
api_host = arguments.get('--hostname', None) or os.environ['AHSAY_HOST']
api = AhsayAPI(api_user, api_pass, api_host)
user = arguments['<user>']
backup_name = arguments['<backup_name>']
last_backup = get_last_backup_time(api, user, backup_name)
two_days_ago = datetime.now() - timedelta(days=1)
if last_backup > two_days_ago:
print('too old')
| {
"repo_name": "koddsson/ahsay-python-api",
"path": "ahsay/example.py",
"copies": "1",
"size": "1790",
"license": "mit",
"hash": 6595940438205103000,
"line_mean": 33.4230769231,
"line_max": 105,
"alpha_frac": 0.6324022346,
"autogenerated": false,
"ratio": 3.5728542914171655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9698413991658907,
"avg_score": 0.0013685068716515257,
"num_lines": 52
} |
a = ['https://store.starbucks.com/coffee/whole-bean-and-ground?utm_source=Sbux&utm_m
edium=Mega&utm_campaign=Coffee+Mega+WB&utm_content=HolFY17&cm_mmc=Sbux-_-Mega-_-
Coffee+Mega+WB-_-HolFY17',
'https://store.starbucks.com/coffee/whole-bean-and-gr
ound?utm_source=sbuc&utm_medium=Mega&utm_campaign=Coffee+Mega+Ground&utm_content
=HolFY!&&cm_mmc=sbuc-_-Mega-_-Coffee+Mega+Ground-_-HolFY!&',
'http://store.starb
ucks.com/coffee/starbucks-verismo-pods?utm_source=SBUXcom&utm_medium=CoffeeMega&
utm_campaign=VerismoPods&utm_content=Evergreen&cm_mmc=SBUXcom-_-CoffeeMega-_-Ver
ismoPods-_-Evergreen', 'http://store.starbucks.com/coffee/starbucks-k-cup-pods/?
utm_source=SBUXcom&utm_medium=CoffeeMega&utm_campaign=Kcup+Pods&utm_content=Ever
green&cm_mmc=SBUXcom-_-CoffeeMega-_-Kcup+Pods-_-Evergreen',
'http://store.starbu
cks.com/on/demandware.store/Sites-Starbucks-Site/default/Reserve-Show?utm_source
=web&utm_medium=sbux&utm_campaign=Homepage&utm_content=HolFY17&cm_mmc=web-_-sbux
-_-Homepage-_-HolFY17', 'http://store.starbucks.com/coffee/starbucks-via-instant
-coffee?utm_source=SBUXcom&utm_medium=CoffeeMega&utm_campaign=VIA+Coffee&utm_con
tent=Evergreen&cm_mmc=SBUXcom-_-CoffeeMega-_-VIA+Coffee-_-Evergreen', 'http://st
ore.starbucks.com/coffee/portion-packs/?utm_source=SBUXcom&utm_medium=CoffeeMega
&utm_campaign=Portion+Packs&utm_content=Evergreen&cm_mmc=SBUXcom-_-CoffeeMega-_-
Portion+Packs-_-Evergreen', 'https://store.starbucks.com/starbucks-reserve-roast
ery-subscription/?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_conte
nt=HolFY17 &cm_mmc=Web-_-HolFY17', 'http://store.starbucks.com/coffee/?utm_sourc
e=SBUXcom&utm_medium=CoffeeMega&utm_campaign=View+All+Coffees&utm_content=Evergr
een&cm_mmc=SBUXcom-_-CoffeeMega-_-View+All+Coffees-_-Evergreen', 'http://store.s
tarbucks.com/coffee/starbucks-reserve-coffee/?utm_source=SBUXcom&utm_medium=Coff
eeMega&utm_campaign=Reserve&utm_content=Evergreen&cm_mmc=SBUXcom-_-CoffeeMega-_-
Reserve-_-Evergreen', 'http://store.starbucks.com/starbucks-reserve-roastery-sub
scription?utm_source=SBUXcom&utm_medium=CoffeeMega&utm_campaign=Reserve+Subscrip
tions&utm_content=Evergreen&cm_mmc=SBUXcom-_-CoffeeMega-_-Reserve+Subscriptions-
_-Evergreen', '/coffee/espresso/latte-macchiato', '/coffee/espresso/doppio-bever
ages', '/coffee/espresso/americano-beverages', '/coffee/espresso/espresso-macchi
ato-beverages', '/coffee/espresso/flat-white-beverages', '/coffee/espresso/cappu
ccino-beverages', '/coffee/espresso/latte-beverages', '/coffee/espresso/caramel-
macchiato-beverages', '/coffee/espresso/mocha-beverages', '/coffee/how-to-brew/c
offee-press', '/coffee/how-to-brew/pour-over', '/coffee/how-to-brew/iced-pour-ov
er', '/coffee/how-to-brew/coffee-brewer', '/coffee/how-to-brew/verismo-system',
'/responsibility/sourcing/coffee', '/responsibility/community/farmer-support', '
http://athome.starbucks.com/', '/coffee/learn/roast', '/coffee/learn/format', '/
coffee/learn/flavors-in-your-cup', '/coffee/learn/clover', 'http://mystarbucksid
ea.force.com/ideaList?ext=0&lsi=0&category=Coffee+%26+Espresso+Drinks', '/custom
er-service/faqs/coffee', 'http://www.starbucks.com/menu/catalog/product?drink=te
a#view_control=product', 'http://www.starbucks.com/menu/catalog/product?drink=te
a#view_control=product', 'http://www.starbucks.com/menu/catalog/product?drink=te
a#view_control=product', 'https://www.starbucks.com/promo/teavana-craft-iced-tea
s', 'http://www.starbucks.com/promo/nutrition', 'http://www.starbucks.com/menu/c
atalog/product?drink=tea#view_control=product', 'http://www.teavana.com/us/en/te
a/full-leaf-tea-sachets?cm_mmc=Sbux.com-_-TeaNav-_-FullLeafTeaSachets-_-FullLeaf
TeaSachets', 'http://www.teavana.com/us/en/tea/matcha-tea?cm_mmc=Sbux.com-_-TeaN
av-_-Matcha-_-MatchaTea', 'http://www.teavana.com/us/en/tea/iced-tea?cm_mmc=Sbux
.com-_-TeaNav-_-IcedTeaAtHome-_-IcedTea', 'http://www.teavana.com/us/en/tea/tea-
filled-tins?cm_mmc=Sbux.com-_-TeaNav-_-TeaFilledTins-_-TeaFilledTins', 'http://w
ww.teavana.com/us/en/gifts/tea-samplers?cm_mmc=Sbux.com-_-TeaNav-_-TeaSamplers-_
-TeaSamplers', 'http://www.teavana.com/us/en/tea-subscriptions?cm_mmc=Sbux.com-_
-TeaNav-_-TeaSubscriptions-_-TeaSubscriptions', 'http://www.teavana.com/us/en/te
a/wellness-tea?cm_mmc=Sbux.com-_-TeaNav-_-WellnessTea-_-WellnessTea', 'http://ww
w.teavana.com/us/en/tea/all-tea?cm_mmc=Sbux.com-_-TeaNav-_-AllTeas-_-AllTea', 'h
ttp://www.teavana.com/us/en/tea/black-tea?cm_mmc=Sbux.com-_-LooseLeafNav-_-Black
Tea-_-BlackTea', 'http://www.teavana.com/us/en/tea/green-tea?cm_mmc=Sbux.com-_-L
ooseLeafNav-_-GreenTea-_-GreenTea', 'http://www.teavana.com/us/en/tea/white-tea?
cm_mmc=Sbux.com-_-LooseLeafNav-_-WhiteTea-_-WhiteTea', 'http://www.teavana.com/u
s/en/tea/herbal-tea?cm_mmc=Sbux.com-_-LooseLeafNav-_-HerbalTea-_-HerbalTea', 'ht
tp://www.teavana.com/us/en/tea/chai-tea?cm_mmc=Sbux.com-_-LooseLeafNav-_-ChaiTea
-_-ChaiTea', 'http://www.teavana.com/us/en/tea/rooibos-tea?cm_mmc=Sbux.com-_-Loo
seLeafNav-_-RooibosTea-_-RooibosTea', 'http://www.teavana.com/us/en/tea/oolong-t
ea?cm_mmc=Sbux.com-_-LooseLeafNav-_-OolongTea-_-OolongTea', 'http://www.teavana.
com/us/en/tea/all-tea?cm_mmc=Sbux.com-_-LooseLeafNav-_-AllLooseLeafTea-_-AllTea'
, 'http://www.teavana.com/us/en/gifts/starter-kits?cm_mmc=Sbux.com-_-TeawareNav-
_-StarterKits-_-StarterKits', 'http://www.teavana.com/us/en/teaware/teavana-perf
ectea-makers?cm_mmc=Sbux.com-_-TeawareNav-_-PerfectTeaMaker-_-PerfectTeaMaker',
'http://www.teavana.com/us/en/tea/tea-rock-sugar?cm_mmc=Sbux.com-_-TeawareNav-_-
RockSugar-_-RockSugar', 'http://www.teavana.com/us/en/teaware/tumbler-cups?cm_mm
c=Sbux.com-_-TeawareNav-_-TumblerCups-_-TumblerCups', 'http://www.teavana.com/us
/en/teaware/breville-tea-makers?cm_mmc=Sbux.com-_-TeawareNav-_-BrevilleTeaMaker-
_-BrevilleTeaMaker', 'http://www.teavana.com/us/en/teaware/tea-infusers?cm_mmc=S
bux.com-_-TeawareNav-_-TeaInfusers-_-TeaInfusers', 'http://www.teavana.com/us/en
/teaware/tea-makers?cm_mmc=Sbux.com-_-TeawareNav-_-TeaMakers-_-TeaMakers', 'http
://www.teavana.com/us/en/teaware/all-teaware?cm_mmc=Sbux.com-_-TeawareNav-_-AllT
eaware-_-AllTeaware', 'http://www.teavana.com/us/en/gifts/tea-samplers?cm_mmc=Sb
ux.com-_-GiftsNav-_-TeaSamplers-_-TeaSamplers', 'http://www.teavana.com/us/en/te
a-subscriptions?cm_mmc=Sbux.com-_-GiftsNav-_-TeaSubscriptions-_-TeaSubscriptions
', 'http://www.teavana.com/us/en/gifts/tea-gift-sets?cm_mmc=Sbux.com-_-GiftsNav-
_-TeaGiftSets-_-TeaGiftSets', 'http://www.teavana.com/us/en/gift-cards?cm_mmc=Sb
ux.com-_-GiftsNav-_-Egift-_-GiftCards', 'http://www.teavana.com/us/en/gift-cards
?cm_mmc=Sbux.com-_-GiftsNav-_-GiftCard-_-GiftCards', 'http://www.teavana.com/us/
en/gifts/starter-kits?cm_mmc=Sbux.com-_-GiftsNav-_-StarterKits-_-StarterKits', '
http://www.teavana.com/us/en/gifts/gifts-under-25?cm_mmc=Sbux.com-_-GiftsNav-_-G
iftsUnder25-_-GiftsUnder25', 'http://www.teavana.com/us/en/gifts/gifts-under-50?
cm_mmc=Sbux.com-_-GiftsNav-_-GiftsUnder50-_-GiftsUnder50', 'http://www.teavana.c
om/us/en/gifts/gifts-under-75?cm_mmc=Sbux.com-_-GiftsNav-_-GiftsUnder75-_-GiftsU
nder75', 'http://www.teavana.com/us/en/gifts/gifts-under-100?cm_mmc=Sbux.com-_-G
iftsNav-_-GiftsUnder100-_-GiftsUnder100', 'http://www.teavana.com/us/en/gifts?cm
_mmc=Sbux.com-_-GiftsNav-_-AllTeaGifts-_-Gifts', 'http://www.teavana.com/us/en/m
ixology?cm_mmc=Sbux.com-_-LearnMoreNav-_-Mixology-_-Mixology', 'http://www.teava
na.com/us/en/tea/matcha-tea?cm_mmc=Sbux.com-_-LearnMoreNav-_-Matcha-_-MatchaTea'
, 'http://www.teavana.com/us/en/discover?cm_mmc=Sbux.com-_-LearnMoreNav-_-Tea101
-_-Discover', 'http://www.teavana.com/us/en/about-us?cm_mmc=Sbux.com-_-LearnMore
Nav-_-AboutTeavana-_-AboutUs', '/menu/drinks/bottled-drinks', '/promo/brewed-cof
fee', '/promo/iced-coffee', '/promo/evolution-fresh-juice', '/menu/drinks/chocol
ate', '/menu/drinks/espresso', '/menu/drinks/frappuccino-blended-beverages', '/m
enu/drinks/kids-drinks-and-other', '/menu/drinks/smoothies', '/promo/sodas', '/m
enu/drinks/refreshers', '/menu/drinks/tea', '/menu/food/bakery', '/menu/food/pet
ites', '/promo/proteinbox', '/menu/food/hot-breakfast', '/menu/food/sandwiches-p
anini-and-wraps', '/menu/food/yogurt-and-fruit', '/starbucks-snack-collection',
'/promo/mercato', '/menu/nutrition/food', 'http://www.starbucks.com/menu/catalog
/nutrition?drink=all#view_control=nutrition', '/menu/nutrition/20-under-200', '/
menu/nutrition/35-under-350', 'http://mystarbucksidea.force.com/ideaList?ext=0&l
si=0&category=Food', 'http://customerservice.starbucks.com/app/answers/list/sess
ion/L3RpbWUvMTM3MTUxNjMzMi9zaWQvQXUySUtfc2w%3D/c/115', '/coffeehouse/wireless-in
ternet/starbucks-digital-network', '/coffeehouse/mobile-apps/mystarbucks', '/cof
feehouse/mobile-apps/starbucks-android', '/coffeehouse/mobile-apps/starbucks-app
-for-windows', '/coffeehouse/mobile-order', 'https://www.starbucks.com/green-apr
on-delivery/', '/promo/music', 'http://mystarbucksidea.force.com/ideaList?ext=0&
lsi=0&category=Atmosphere+%26+Locations', '/coffeehouse/community/social-media-t
erms-and-conditions', '/coffeehouse/learn-more/my-starbucks-idea', '/customer-se
rvice/faqs/coffeehouse', '/store-locator', '/responsibility/community/refugee-hi
ring', '/responsibility/community/opportunity-youth', '/responsibility/community
/community-stores', '/responsibility/community/starbucks-foundation', '/responsi
bility/community/community-service', '/responsibility/community/veterans', '/res
ponsibility/community/diversity-and-inclusion', '/responsibility/community/farme
r-support', '/seattle', '/responsibility/community/starbucks-red', '/responsibil
ity/community/foodshare', '/responsibility/sourcing/suppliers', '/responsibility
/sourcing/coffee', '/responsibility/sourcing/tea', '/responsibility/sourcing/coc
oa', '/responsibility/sourcing/store-products', '/responsibility/environment/lee
d-certified-stores', '/responsibility/environment/recycling', '/responsibility/e
nvironment/water-and-energy', '/responsibility/environment/climate-change', '/ca
reers/college-plan', '/responsibility/learn-more/conflict-minerals-disclosure',
'/responsibility/learn-more/relationships', '/responsibility/learn-more/policies
', 'http://globalassets.starbucks.com/assets/2994ceff517a44aca17df6f1237c4c13.pd
f', 'http://customerservice.starbucks.com/app/answers/detail/a_id/2286/kw/commun
ity', '/shop/card/egift', '/shop/card/starbucks-card', 'http://explore.starbucks
cardb2b.com/businessgifts?utm_source=SBUX.com&utm_medium=SBUXsiteLink&utm_campai
gn=Header-BuyACard', '/Card#cardBalanceWrapper', '/card/reload/one-time', '/card
/manage/transfer', '/card/manage/history', '/account/card', '/account/create/reg
ister', '/card/rewards', '/account', '/card/rewards/rewards-program-ts-and-cs',
'http://members.starbucks.com/?utm_source=homepagehero&utm_medium=referral&utm_c
ampaign=summer1&utm_content=summerfy16', '/about-us/company-information/online-p
olicies/terms-of-use/card', '/card/egift', 'https://starbucks.cashstar.com/self_
service/register/', '/customer-service/faqs/card', '/account', 'http://mystarbuc
ksidea.force.com/ideaList?ext=0&lsi=0&category=Starbucks+Card', 'http://1912pike
.com/column/farm-to-cup/?utm_source=website&utm_medium=blog_megamenu&utm_campaig
n=farm_to_cup', 'http://1912pike.com/column/how-to/?utm_source=website&utm_mediu
m=blog_megamenu&utm_campaign=how_to', 'http://1912pike.com/column/recipes/?utm_s
ource=website&utm_medium=blog_megamenu&utm_campaign=recipes', 'http://1912pike.c
om/column/taste-drinks/?utm_source=website&utm_medium=blog_megamenu&utm_campaign
=taste_drinks', 'http://1912pike.com/column/people/?utm_source=website&utm_mediu
m=blog_megamenu&utm_campaign=people', 'http://1912pike.com/column/places/?utm_so
urce=website&utm_medium=blog_megamenu&utm_campaign=places', 'https://store.starb
ucks.com/coffee/whole-bean-and-ground?utm_source=Web&utm_medium=Sbux&utm_campaig
n=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https:
//store.starbucks.com/coffee/starbucks-verismo-pods?utm_source=Web&utm_medium=Sb
ux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-Hol
FY17', 'https://store.starbucks.com/coffee/starbucks-k-cup-pods?utm_source=Web&u
tm_medium=Sbux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Ho
mepage-_-HolFY17', 'https://store.starbucks.com/coffee/starbucks-via-instant-cof
fee?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_content=HolFY17 &cm
_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https://store.starbucks.com/coffee/starb
ucks-via-instant-refreshers-beverages?utm_source=Web&utm_medium=Sbux&utm_campaig
n=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https:
//store.starbucks.com/coffee/portion-packs?utm_source=Web&utm_medium=Sbux&utm_ca
mpaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'h
ttps://store.starbucks.com/collections/reserve-collection/?utm_source=Web&utm_me
dium=Sbux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepag
e-_-HolFY17', 'https://store.starbucks.com/tea/black-tea?utm_source=Web&utm_medi
um=Sbux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-
_-HolFY17', 'https://store.starbucks.com/tea/chai-tea?utm_source=Web&utm_medium=
Sbux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-H
olFY17', 'https://store.starbucks.com/tea/green-tea?utm_source=Web&utm_medium=Sb
ux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-Hol
FY17', 'https://store.starbucks.com/tea/herbal-tea?utm_source=Web&utm_medium=Sbu
x&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolF
Y17', 'https://store.starbucks.com/tea/white-tea?utm_source=Web&utm_medium=Sbux&
utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY1
7', 'https://store.starbucks.com/tea/iced-tea?utm_source=Web&utm_medium=Sbux&utm
_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17',
'https://store.starbucks.com/tea/teavana?utm_source=Web&utm_medium=Sbux&utm_cam
paign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'ht
tps://store.starbucks.com/drinkware/cups-and-mugs?utm_source=Web&utm_medium=Sbux
&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY
17', 'https://store.starbucks.com/drinkware/tumblers-and-travel-mugs?utm_source=
Web&utm_medium=Sbux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux
-_-Homepage-_-HolFY17', 'https://store.starbucks.com/drinkware/cold-cups?utm_sou
rce=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_content=HolFY17 &cm_mmc=Web-_-
Sbux-_-Homepage-_-HolFY17', 'https://store.starbucks.com/collections/starbucks-l
ocal-collection?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_content
=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https://store.starbucks.com/
equipment/coffee-presses?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&ut
m_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https://store.starb
ucks.com/equipment/coffee-makers?utm_source=Web&utm_medium=Sbux&utm_campaign=Hom
epage&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https://sto
re.starbucks.com/equipment/espresso-makers', 'https://store.starbucks.com/equipm
ent/grinders?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_content=Ho
lFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https://store.starbucks.com/equ
ipment/teapots-and-tea-kettles?utm_source=Web&utm_medium=Sbux&utm_campaign=Homep
age&utm_content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'http://store.
starbucks.com/verismo/?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_
content=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', 'https://store.starbuc
ks.com/collections?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_cont
ent=HolFY17 &cm_mmc=Web-_-HolFY17', 'https://store.starbucks.com/collections/res
erve-collection/?utm_source=Web&utm_medium=Sbux&utm_campaign=Homepage&utm_conten
t=HolFY17 &cm_mmc=Web-_-Sbux-_-Homepage-_-HolFY17', '/shop/card/egift', '/shop/c
ard/buy', '/shop/card/starbucks-card', 'https://www.starbuckscardb2b.com/', 'htt
p://mystarbucksidea.force.com/ideaList?ext=0&lsi=0&category=Merchandise+%26+Musi
c', '/customer-service/faqs/shop']
b = []
for each in a:
if 'http' in each:
b.append(each)
print(b) | {
"repo_name": "Zhaominxin/MyProject",
"path": "starbucks/select_urls.py",
"copies": "1",
"size": "16527",
"license": "mit",
"hash": -4151703646051846000,
"line_mean": 75.9669811321,
"line_max": 84,
"alpha_frac": 0.7505294367,
"autogenerated": false,
"ratio": 2.1803430079155675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3430872444615568,
"avg_score": null,
"num_lines": null
} |
"""A hug directive that enables easy interaction and storage of profiling information and code path explanations."""
from copy import deepcopy
from datetime import datetime
import hug
from hug_explainable.context_manager import explainable
from inspect import getframeinfo, stack
@hug.directive()
class Explainable(object):
"""Provides a mechanism for explaining and profiling code paths"""
__slots__ = ('explanation', )
def __init__(self, enabled=True, **kwargs):
self.explanation = [] if enabled else None
def enable(self):
"""Enables storing the recording of explanations of individual code blocks"""
self.explanation = [] if self.explanation is None else self.explanation
def disable(self):
"""Disables the recording of explanations of individual code blocks"""
self.explanation = None
@property
def enabled(self):
"""Returns True if the explanations are enabled"""
return bool(self)
@enabled.setter
def enabled(self, enable):
"""Switches between enabled and not based on a boolean flag"""
if enable:
self.enable()
else:
self.disable()
def insert_into(self, dictionary):
"""Inserts the explanation if there is one, otherwise performs a noop"""
if self:
dictionary['explanation'] = self.explanation
def explained(self, dictionary):
"""Returns an explained version of the dictionary if explainations are enabled,
otherwise the dictionary unchanged
"""
self.insert_into(dictionary)
return dictionary
def __getitem__(self, index):
if type(index) == int:
return self.explanation.__getitem__(index)
for item in self:
if item.get(index, None):
return item
raise KeyError('Explanation not found')
def __setitem__(self, explanation, value):
if type(explanation) == int:
return self.explanation.__getitem__(explanation)
if self:
now = datetime.now()
caller = getframeinfo(stack()[1][0])
self.explanation.append({'action': explanation, 'value': deepcopy(value), 'line': caller.lineno,
'file': caller.filename,
'time': '{}:{}:{}'.format(now.hour, now.minute, now.second),
'datetime': now.isoformat(), 'date': now.date().isoformat(), 'took': 0.0})
def __bool__(self):
return self.explanation != None
def __call__(self, explanation, value):
return explainable(explanation, value, self.explanation)
def __native_types__(self):
return self.explanation
def __iter__(self):
return self.explanation.__iter__() if self else ().__iter__()
def __contains__(self, item):
return self.explanation.__contains__(item) if self else False
def __len__(self):
return self.explanation.__len__() if self else 0
def __delitem__(self, item):
return self.explanation.__delitem__(item)
def __reversed__(self):
return self.explanation.__reversed__() if self else ().__reversed__()
def append(self, value):
return self.explanation.append(value)
def pop(self, index, *kargs, **kwargs):
return self.explanation.pop(index, *kargs, **kwargs)
@hug.directive()
def explain(default=None, response=None, **kwargs):
"""Returns the current explanation object if one is present within the response context"""
return (response.context if response else {}).get('explanation', Explainable(False))
| {
"repo_name": "timothycrosley/hug_explainable",
"path": "hug_explainable/directive.py",
"copies": "1",
"size": "3668",
"license": "mit",
"hash": 2786187956554770400,
"line_mean": 33.6037735849,
"line_max": 116,
"alpha_frac": 0.6166848419,
"autogenerated": false,
"ratio": 4.613836477987421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010556164780502059,
"num_lines": 106
} |
"""A hug middleware that automatically adds explanations to the request."""
import json
import hug
from hug_explainable.directive import Explainable
DOCUMENT = """
<html>
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Explanation</title>
<style>
/* General Blueprint Style */
@import url(http://fonts.googleapis.com/css?family=Lato:300,400,700);
@font-face {
font-family: 'bpicons';
src:url('../fonts/bpicons/bpicons.eot');
src:url('../fonts/bpicons/bpicons.eot?#iefix') format('embedded-opentype'),
url('../fonts/bpicons/bpicons.woff') format('woff'),
url('../fonts/bpicons/bpicons.ttf') format('truetype'),
url('../fonts/bpicons/bpicons.svg#bpicons') format('svg');
font-weight: normal;
font-style: normal;
} /* Made with http://icomoon.io/ */
*, *:after, *:before { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; }
body, html { font-size: 100%; padding: 0; margin: 0;}
/* Clearfix hack by Nicolas Gallagher: http://nicolasgallagher.com/micro-clearfix-hack/ */
.clearfix:before, .clearfix:after { content: " "; display: table; }
.clearfix:after { clear: both; }
body {
font-family: 'Lato', Calibri, Arial, sans-serif;
color: #47a3da;
}
a {
color: #f0f0f0;
text-decoration: none;
}
a:hover {
color: #000;
}
.main,
.container > header {
width: 90%;
max-width: 69em;
margin: 0 auto;
padding: 0 1.875em 3.125em 1.875em;
}
.container > header {
padding: 2.875em 1.875em 1.875em;
}
.container > header h1 {
font-size: 2.125em;
line-height: 1.3;
margin: 0 0 0.6em 0;
float: left;
font-weight: 400;
}
.container > header > span {
display: block;
position: relative;
z-index: 9999;
font-weight: 700;
text-transform: uppercase;
letter-spacing: 0.5em;
padding: 0 0 0.6em 0.1em;
}
.container > header > span span:after {
width: 30px;
height: 30px;
left: -12px;
font-size: 50%;
top: -8px;
font-size: 75%;
position: relative;
}
.container > header > span span:hover:before {
content: attr(data-content);
text-transform: none;
text-indent: 0;
letter-spacing: 0;
font-weight: 300;
font-size: 110%;
padding: 0.8em 1em;
line-height: 1.2;
text-align: left;
left: auto;
margin-left: 4px;
position: absolute;
color: #fff;
background: #47a3da;
}
.container > header nav {
float: right;
text-align: center;
}
.container > header nav a {
display: inline-block;
position: relative;
text-align: left;
width: 2.5em;
height: 2.5em;
background: #fff;
border-radius: 50%;
margin: 0 0.1em;
border: 4px solid #47a3da;
}
.container > header nav a > span {
display: none;
}
.container > header nav a:hover:before {
content: attr(data-info);
color: #47a3da;
position: absolute;
width: 600%;
top: 120%;
text-align: right;
right: 0;
pointer-events: none;
}
.container > header nav a:hover {
background: #47a3da;
}
.bp-icon:after {
font-family: 'bpicons';
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
text-align: center;
color: #47a3da;
-webkit-font-smoothing: antialiased;
}
.container > header nav .bp-icon:after {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
line-height: 2;
text-indent: 0;
}
.container > header nav a:hover:after {
color: #fff;
}
.bp-icon-next:after {
content: "\e000";
}
.bp-icon-drop:after {
content: "\e001";
}
.bp-icon-archive:after {
content: "\e002";
}
.bp-icon-about:after {
content: "\e003";
}
.bp-icon-prev:after {
content: "\e004";
}
@media screen and (max-width: 55em) {
.container > header h1,
.container > header nav {
float: none;
}
.container > header > span,
.container > header h1 {
text-align: center;
}
.container > header nav {
margin: 0 auto;
}
.container > header > span {
text-indent: 30px;
}
}
@font-face {
font-family: 'ecoico';
src:url('../fonts/timelineicons/ecoico.eot');
src:url('../fonts/timelineicons/ecoico.eot?#iefix') format('embedded-opentype'),
url('../fonts/timelineicons/ecoico.woff') format('woff'),
url('../fonts/timelineicons/ecoico.ttf') format('truetype'),
url('../fonts/timelineicons/ecoico.svg#ecoico') format('svg');
font-weight: normal;
font-style: normal;
} /* Made with http://icomoon.io/ */
.cbp_tmtimeline {
margin: 30px 0 0 0;
padding: 0;
list-style: none;
position: relative;
}
/* The line */
.cbp_tmtimeline:before {
content: '';
position: absolute;
top: 0;
bottom: 0;
width: 10px;
background: #afdcf8;
left: 20%;
margin-left: -10px;
}
.cbp_tmtimeline > li {
position: relative;
}
/* The date/time */
.cbp_tmtimeline > li .cbp_tmtime {
display: block;
width: 25%;
padding-right: 100px;
position: absolute;
}
.cbp_tmtimeline > li .cbp_tmtime span {
display: block;
text-align: right;
}
.cbp_tmtimeline > li .cbp_tmtime span:first-child {
font-size: 0.9em;
color: #bdd0db;
}
.cbp_tmtimeline > li .cbp_tmtime span:last-child {
font-size: 2.9em;
color: #3594cb;
}
.cbp_tmtimeline > li:nth-child(odd) .cbp_tmtime span:last-child {
color: #6cbfee;
}
/* Right content */
.cbp_tmtimeline > li .cbp_tmlabel {
margin: 0 0 15px 25%;
background: #3594cb;
color: #fff;
padding: 2em;
font-size: 1.2em;
font-weight: 300;
line-height: 1.4;
position: relative;
border-radius: 5px;
}
.cbp_tmtimeline > li:nth-child(odd) .cbp_tmlabel {
background: #6cbfee;
}
.cbp_tmtimeline > li .cbp_tmlabel h2 {
margin-top: 0px;
padding: 0 0 10px 0;
border-bottom: 1px solid rgba(255,255,255,0.4);
}
/* The triangle */
.cbp_tmtimeline > li .cbp_tmlabel:after {
right: 100%;
border: solid transparent;
content: " ";
height: 0;
width: 0;
position: absolute;
pointer-events: none;
border-right-color: #3594cb;
border-width: 10px;
top: 10px;
}
.cbp_tmtimeline > li:nth-child(odd) .cbp_tmlabel:after {
border-right-color: #6cbfee;
}
/* The icons */
.cbp_tmtimeline > li .cbp_tmicon {
width: 40px;
height: 40px;
font-family: 'ecoico';
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
font-size: 1.4em;
line-height: 40px;
-webkit-font-smoothing: antialiased;
position: absolute;
color: #fff;
background: #46a4da;
border-radius: 50%;
box-shadow: 0 0 0 8px #afdcf8;
text-align: center;
left: 20%;
top: 0;
margin: 0 0 0 -25px;
}
.cbp_tmicon-phone:before {
content: "\e000";
}
.cbp_tmicon-screen:before {
content: "\e001";
}
.cbp_tmicon-mail:before {
content: "\e002";
}
.cbp_tmicon-earth:before {
content: "\e003";
}
/* Example Media Queries */
@media screen and (max-width: 65.375em) {
.cbp_tmtimeline > li .cbp_tmtime span:last-child {
font-size: 1.5em;
}
}
@media screen and (max-width: 47.2em) {
.cbp_tmtimeline:before {
display: none;
}
.cbp_tmtimeline > li .cbp_tmtime {
width: 100%;
position: relative;
padding: 0 0 20px 0;
}
.cbp_tmtimeline > li .cbp_tmtime span {
text-align: left;
}
.cbp_tmtimeline > li .cbp_tmlabel {
margin: 0 0 30px 0;
padding: 1em;
font-weight: 400;
font-size: 95%;
}
.cbp_tmtimeline > li .cbp_tmlabel:after {
right: auto;
left: 20px;
border-right-color: transparent;
border-bottom-color: #3594cb;
top: -20px;
}
.cbp_tmtimeline > li:nth-child(odd) .cbp_tmlabel:after {
border-right-color: transparent;
border-bottom-color: #6cbfee;
}
.cbp_tmtimeline > li .cbp_tmicon {
position: relative;
float: right;
left: auto;
margin: -55px 5px 0 0px;
}
}
</style>
</head>
<body>
<div class="container">
<div class="main">
<ul class="cbp_tmtimeline">
{content}
</ul>
</div>
</div>
</body>
</html>
"""
EXPLANATION = """
<li>
<time class="cbp_tmtime" datetime="{datetime}"><span>{date}</span> <span>{time}</span></time>
<div class="cbp_tmicon cbp_tmicon-screen"></div>
<div class="cbp_tmlabel" style="{extra_css}">
<h2>{description}</h2>
<pre>{data}</pre>
<a href="{code_url}">
{file}: {line}
</a>
<p>Took: {took}</p>
{extra_html}
</div>
</li>
"""
def init(api, option='explain', code_urls=None):
@hug.request_middleware(api=api)
def add_explain(request, response):
should_explain = hug.types.smart_boolean(request.get_param('explain') or '.explain.' in request.path)
response.context['explanation'] = Explainable(should_explain)
@hug.response_middleware(api=api)
def output_explain_if_requested(request, response, resource):
if '.explain.' in request.path:
if '.json' in request.path:
response.content_type = 'application/json; charset=utf-8'
response.body = hug.output_format.json(response.context['explanation'].explanation)
elif '.html' in request.path:
explanations = []
response.content_type = 'text/html; charset=utf-8'
for explanation in response.context['explanation'].explanation:
if not explanation.get('action', ''):
continue
code_url = ''
for match, url in (code_urls or {}).items():
if 'file' in explanation and match in explanation['file']:
code_url = "{}{}#L{}".format(url, explanation['file'].split(match)[-1],
explanation.get('line', 0))
break
extra_html = ''
if 'failed_with' in explanation:
extra_html = '<b>FAILED: {}</b>'.format(explanation['failed_with'])
if 'failed_after' in explanation:
explanation['took'] = explanation['failed_after']
explanations.append(EXPLANATION.format(data=api.http.output_format(explanation.get('value', ''),
indent=4).decode('utf8'),
description=explanation.get('action', ''),
date=explanation.get('date', ''),
time=explanation.get('time', ''),
datetime=explanation.get('datetime', ''),
took=explanation.get('took', 0.0),
file=explanation.get('file', ''),
line=explanation.get('line', ''),
extra_css='background-color: #a70000;' if 'failed_with' in
explanation else '',
code_url=code_url,
extra_html=extra_html))
response.body = hug.output_format.html(DOCUMENT.replace('{content}', '\n'.join(explanations)))
| {
"repo_name": "timothycrosley/hug_explainable",
"path": "hug_explainable/middleware.py",
"copies": "1",
"size": "16001",
"license": "mit",
"hash": -1989437529620294400,
"line_mean": 32.9004237288,
"line_max": 121,
"alpha_frac": 0.4116617711,
"autogenerated": false,
"ratio": 4.4213871235147835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5333048894614784,
"avg_score": null,
"num_lines": null
} |
"""A human-run test to see if toolbars have correct widget traversal.
Currently only tests FirstOrderBar.
"""
# TODO: add other toolbars as well.
# Purpose: intent is an integration test for toolbars and widgets, to test the
# widget._find_next_entry method (and, at some future time, ._on_return,
# ._on_tab, ._refresh as well.) When a similar, automated test was attempted,
# 'assert current_entry is not next_entry' was always false, and the initial
# entry widget was returned. Hypothesis is that _find_next_entry requires
# .tk.focusNext() and that this in turn requires the mainloop.
import tkinter as tk
from nmrmint.GUI.toolbars import FirstOrderBar
def dummy_callback(*args, **kwargs):
print(args)
print(kwargs)
# noinspection PyProtectedMember
def focus_next_entry():
global current_entry
next_entry = current_entry.master._find_next_entry(current_entry)
next_entry.focus()
print('current: ', current_entry.widgetName, current_entry)
print('next: ', next_entry.widgetName, next_entry)
assert current_entry is not next_entry
assert isinstance(current_entry, (tk.Entry, tk.Spinbox))
current_entry = next_entry
if __name__ == '__main__':
root = tk.Tk()
root.title('test toolbars')
# Note: immediately packing testbar broke things
testbar = FirstOrderBar(root, callback=dummy_callback) # .pack(side=tk.TOP)
print(type(testbar))
# noinspection PyProtectedMember
first_widget = testbar._fields['# of nuclei']
first_entry = first_widget.entry
current_entry = first_entry
focusbutton = tk.Button(testbar,
name='focus_button',
text='Reset Focus',
command=lambda: first_entry.focus())
focusnextbutton = tk.Button(testbar,
name='focus_next_button',
text='Next Focus',
command=lambda: focus_next_entry())
focusbutton.pack(side=tk.LEFT)
focusnextbutton.pack(side=tk.LEFT)
testbar.pack(side=tk.TOP)
# workaround fix for Tk problems and mac mouse/trackpad:
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
| {
"repo_name": "sametz/nmrmint",
"path": "tests/human/toolbar_focus.py",
"copies": "1",
"size": "2276",
"license": "mit",
"hash": -2180323120616833800,
"line_mean": 31.9855072464,
"line_max": 80,
"alpha_frac": 0.64543058,
"autogenerated": false,
"ratio": 3.8972602739726026,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001789228842368939,
"num_lines": 69
} |
# AI 2018
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
# Importing the libraries
import os
import numpy as np
import gym
from gym import wrappers
import pybullet_envs
import time
import multiprocessing as mp
from multiprocessing import Process, Pipe
import argparse
# Setting the Hyper Parameters
class Hp():
def __init__(self):
self.nb_steps = 10000
self.episode_length = 1000
self.learning_rate = 0.02
self.nb_directions = 16
self.nb_best_directions = 8
assert self.nb_best_directions <= self.nb_directions
self.noise = 0.03
self.seed = 1
self.env_name = 'HalfCheetahBulletEnv-v0'
# Multiprocess Exploring the policy on one specific direction and over one episode
_RESET = 1
_CLOSE = 2
_EXPLORE = 3
def ExploreWorker(rank, childPipe, envname, args):
env = gym.make(envname)
nb_inputs = env.observation_space.shape[0]
normalizer = Normalizer(nb_inputs)
observation_n = env.reset()
n = 0
while True:
n += 1
try:
# Only block for short times to have keyboard exceptions be raised.
if not childPipe.poll(0.001):
continue
message, payload = childPipe.recv()
except (EOFError, KeyboardInterrupt):
break
if message == _RESET:
observation_n = env.reset()
childPipe.send(["reset ok"])
continue
if message == _EXPLORE:
#normalizer = payload[0] #use our local normalizer
policy = payload[1]
hp = payload[2]
direction = payload[3]
delta = payload[4]
state = env.reset()
done = False
num_plays = 0.
sum_rewards = 0
while not done and num_plays < hp.episode_length:
normalizer.observe(state)
state = normalizer.normalize(state)
action = policy.evaluate(state, delta, direction, hp)
state, reward, done, _ = env.step(action)
reward = max(min(reward, 1), -1)
sum_rewards += reward
num_plays += 1
childPipe.send([sum_rewards])
continue
if message == _CLOSE:
childPipe.send(["close ok"])
break
childPipe.close()
# Normalizing the states
class Normalizer():
def __init__(self, nb_inputs):
self.n = np.zeros(nb_inputs)
self.mean = np.zeros(nb_inputs)
self.mean_diff = np.zeros(nb_inputs)
self.var = np.zeros(nb_inputs)
def observe(self, x):
self.n += 1.
last_mean = self.mean.copy()
self.mean += (x - self.mean) / self.n
self.mean_diff += (x - last_mean) * (x - self.mean)
self.var = (self.mean_diff / self.n).clip(min=1e-2)
def normalize(self, inputs):
obs_mean = self.mean
obs_std = np.sqrt(self.var)
return (inputs - obs_mean) / obs_std
# Building the AI
class Policy():
def __init__(self, input_size, output_size, env_name, args):
try:
self.theta = np.load(args.policy)
except:
self.theta = np.zeros((output_size, input_size))
self.env_name = env_name
print("Starting policy theta=", self.theta)
def evaluate(self, input, delta, direction, hp):
if direction is None:
return np.clip(self.theta.dot(input), -1.0, 1.0)
elif direction == "positive":
return np.clip((self.theta + hp.noise * delta).dot(input), -1.0, 1.0)
else:
return np.clip((self.theta - hp.noise * delta).dot(input), -1.0, 1.0)
def sample_deltas(self):
return [np.random.randn(*self.theta.shape) for _ in range(hp.nb_directions)]
def update(self, rollouts, sigma_r, args):
step = np.zeros(self.theta.shape)
for r_pos, r_neg, d in rollouts:
step += (r_pos - r_neg) * d
self.theta += hp.learning_rate / (hp.nb_best_directions * sigma_r) * step
timestr = time.strftime("%Y%m%d-%H%M%S")
np.save(args.logdir + "/policy_" + self.env_name + "_" + timestr + ".npy", self.theta)
# Exploring the policy on one specific direction and over one episode
def explore(env, normalizer, policy, direction, delta, hp):
state = env.reset()
done = False
num_plays = 0.
sum_rewards = 0
while not done and num_plays < hp.episode_length:
normalizer.observe(state)
state = normalizer.normalize(state)
action = policy.evaluate(state, delta, direction, hp)
state, reward, done, _ = env.step(action)
reward = max(min(reward, 1), -1)
sum_rewards += reward
num_plays += 1
return sum_rewards
# Training the AI
def train(env, policy, normalizer, hp, parentPipes, args):
for step in range(hp.nb_steps):
# Initializing the perturbations deltas and the positive/negative rewards
deltas = policy.sample_deltas()
positive_rewards = [0] * hp.nb_directions
negative_rewards = [0] * hp.nb_directions
if parentPipes:
for k in range(hp.nb_directions):
parentPipe = parentPipes[k]
parentPipe.send([_EXPLORE, [normalizer, policy, hp, "positive", deltas[k]]])
for k in range(hp.nb_directions):
positive_rewards[k] = parentPipes[k].recv()[0]
for k in range(hp.nb_directions):
parentPipe = parentPipes[k]
parentPipe.send([_EXPLORE, [normalizer, policy, hp, "negative", deltas[k]]])
for k in range(hp.nb_directions):
negative_rewards[k] = parentPipes[k].recv()[0]
else:
# Getting the positive rewards in the positive directions
for k in range(hp.nb_directions):
positive_rewards[k] = explore(env, normalizer, policy, "positive", deltas[k], hp)
# Getting the negative rewards in the negative/opposite directions
for k in range(hp.nb_directions):
negative_rewards[k] = explore(env, normalizer, policy, "negative", deltas[k], hp)
# Gathering all the positive/negative rewards to compute the standard deviation of these rewards
all_rewards = np.array(positive_rewards + negative_rewards)
sigma_r = all_rewards.std()
# Sorting the rollouts by the max(r_pos, r_neg) and selecting the best directions
scores = {
k: max(r_pos, r_neg)
for k, (r_pos, r_neg) in enumerate(zip(positive_rewards, negative_rewards))
}
order = sorted(scores.keys(), key=lambda x: -scores[x])[:hp.nb_best_directions]
rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k]) for k in order]
# Updating our policy
policy.update(rollouts, sigma_r, args)
# Printing the final reward of the policy after the update
reward_evaluation = explore(env, normalizer, policy, None, None, hp)
print('Step:', step, 'Reward:', reward_evaluation)
# Running the main code
def mkdir(base, name):
path = os.path.join(base, name)
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == "__main__":
mp.freeze_support()
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--env', help='Gym environment name', type=str, default='HalfCheetahBulletEnv-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=1)
parser.add_argument('--render', help='OpenGL Visualizer', type=int, default=0)
parser.add_argument('--movie', help='rgb_array gym movie', type=int, default=0)
parser.add_argument('--steps', help='Number of steps', type=int, default=10000)
parser.add_argument('--policy', help='Starting policy file (npy)', type=str, default='')
parser.add_argument(
'--logdir', help='Directory root to log policy files (npy)', type=str, default='.')
parser.add_argument('--mp', help='Enable multiprocessing', type=int, default=1)
args = parser.parse_args()
hp = Hp()
hp.env_name = args.env
hp.seed = args.seed
hp.nb_steps = args.steps
print("seed = ", hp.seed)
np.random.seed(hp.seed)
parentPipes = None
if args.mp:
num_processes = hp.nb_directions
processes = []
childPipes = []
parentPipes = []
for pr in range(num_processes):
parentPipe, childPipe = Pipe()
parentPipes.append(parentPipe)
childPipes.append(childPipe)
for rank in range(num_processes):
p = mp.Process(target=ExploreWorker, args=(rank, childPipes[rank], hp.env_name, args))
p.start()
processes.append(p)
work_dir = mkdir('exp', 'brs')
monitor_dir = mkdir(work_dir, 'monitor')
env = gym.make(hp.env_name)
if args.render:
env.render(mode="human")
if args.movie:
env = wrappers.Monitor(env, monitor_dir, force=True)
nb_inputs = env.observation_space.shape[0]
nb_outputs = env.action_space.shape[0]
policy = Policy(nb_inputs, nb_outputs, hp.env_name, args)
normalizer = Normalizer(nb_inputs)
print("start training")
train(env, policy, normalizer, hp, parentPipes, args)
if args.mp:
for parentPipe in parentPipes:
parentPipe.send([_CLOSE, "pay2"])
for p in processes:
p.join()
| {
"repo_name": "MTASZTAKI/ApertusVR",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/ARS/ars.py",
"copies": "4",
"size": "8825",
"license": "mit",
"hash": -3985892365939780600,
"line_mean": 29.6423611111,
"line_max": 100,
"alpha_frac": 0.6554107649,
"autogenerated": false,
"ratio": 3.3251695553880936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015513209895316907,
"num_lines": 288
} |
"""ai and foreign keys genomic set member
Revision ID: da3c59138805
Revises: 6f26f7c49be7
Create Date: 2020-02-19 15:04:32.203546
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'da3c59138805'
down_revision = '6f26f7c49be7'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('ai_an', sa.String(length=2), nullable=True))
op.alter_column('genomic_set_member', 'participant_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True)
# Only delete the participant_id foreign key if it exists (stable won't have it)
constraint_sql = """
SELECT count(*)
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
WHERE REFERENCED_TABLE_SCHEMA = 'rdr'
AND TABLE_NAME = 'genomic_set_member'
AND CONSTRAINT_NAME = 'genomic_set_member_ibfk_3'
AND REFERENCED_COLUMN_NAME = 'participant_id'
"""
connection = op.get_bind()
pid_key_exists = connection.execute(constraint_sql).fetchone()[0]
if pid_key_exists == 1:
op.drop_constraint('genomic_set_member_ibfk_3', 'genomic_set_member', type_='foreignkey')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key('genomic_set_member_ibfk_3', 'genomic_set_member', 'participant', ['participant_id'], ['participant_id'])
op.alter_column('genomic_set_member', 'participant_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
op.drop_column('genomic_set_member', 'ai_an')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/da3c59138805_ai_and_foreign_keys_genomic_set_member.py",
"copies": "1",
"size": "3052",
"license": "bsd-3-clause",
"hash": -8390815264279897000,
"line_mean": 37.15,
"line_max": 131,
"alpha_frac": 0.7031454784,
"autogenerated": false,
"ratio": 3.5863689776733256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710740462618012,
"avg_score": 0.01575479869106274,
"num_lines": 80
} |
# AI Class. This is my first ever take on AI.
# It gives commands by setting an attribute,
# which is then accesed by a getCommand() method
# in the game instance's handleInput() method.
# We'll work with heuristics:
# 1. Aggregate height (minimize)
# 2. Complete lines (maximize)
# 3. Holes (minimize)
# 4. Bumpiness (minimize)
import curses
import debug
class AI:
def __init__(self, game):
# The AI has access to its game object, allowing it to directly call methods in order to move and rotate the block, etc.
self.game = game
self.window = game.windowObject
self.command = None
def setBlock(self, block):
# Set the block object.
# We don't actually do anything with this, except get the coordinates of the block,
# which are required for calculations.
self.blockObj = block
def setNextBlock(self, block):
# Set the next block
self.nextblock = block
def getCommand(self):
# Returns the command that the AI wants to perform.
# This is actually a curses.KEY_* value.
return self.command
def computeHeuristics(self):
# Encapsulation function for computing the various heuristics.
grid = self.window.grid
self.game.dropblock(False) # False indicates that the changes are temporary.
# Actual checks
lines = self.checklines(grid)
holes = self.checkholes(grid)
height = self.checkheight(grid)
bumpiness = self.checkbumpiness(grid)
# NOTE: Remember to undrop the block!
self.game.undrop()
if lines:
debug.debug("AI detected completed lines: %s" % lines)
if holes:
debug.debug("AI detected new holes: %s" % holes)
if bumpiness:
debug.debug("AI detected bumpiness level: %s" % bumpiness)
debug.debug("Height of grid: %s" % height)
def checklines(self, grid):
# Checks how many lines will be completed on the grid
linerange = self.window.endx - (self.window.startx + 1)
lines = 0
for y in grid:
if len(grid[y]) == linerange:
lines += 1
return lines
def checkholes(self, grid):
# We need to go through all the grid's positions.
# If a given y,x position is occupied, we check whether (y+1, x) is also occupied.
# If it is, that means we have a hole.
holes = 0
for y in grid:
xes = grid[y]
for xtuple in xes:
# This HAS to be an occupied position, as our grid is composed of:
# y: [(x, c), (x, c), (x, c), (x, c), ...] positions.
# Therefore if we can loop on it, it exists in the grid, and this y,x position is taken.
if y+1 in grid:
x = xtuple[0]
exes = self.window.extractxes(grid, y+1) # "Extracted xes"
if x not in exes:
# Hole detected.
holes += 1
# We divide by two because each "x unit" of a block is actually made up of 2 x positions.
# So the I block actually takes up 8 x spaces when it's vertical, not 4.
return int(holes / 2)
def formxlist(self, grid):
# Forms the "x list" so we can calculate height and bumpiness
xlist = []
temp = []
add = True
for y in grid:
if add:
# Since each column is made up of two x positions and one y position, we don't need to add both.
# We skip over one of them each time.
xes = self.window.extractxes(grid, y)
for x in xes:
temp.append(y)
xlist.append(temp)
temp = []
add = False
else:
add = True
return xlist
def checkheight(self, grid):
# We check the height of the grid.
# In order to account for holes, we start the total sum with checkholes()
total = self.checkholes(grid)
xlist = self.formxlist(grid)
# Now we have a list.
return sum(len(x) for x in xlist)
def checkbumpiness(self, grid):
# Here we compute the absolute value of between all two adjacent columns.
# We check holes as well in order to account for them.
xlist = self.formxlist(grid)
total = self.checkholes(grid)
for i, x in enumerate(xlist[::2]):
try:
length = len(x) - len(xlist[i + 1])
total += length
except IndexError:
total += len(xlist[i])
return total
| {
"repo_name": "SpiritualForest/tetris",
"path": "src/ai.py",
"copies": "1",
"size": "4695",
"license": "mit",
"hash": 1226782768736429800,
"line_mean": 36.8629032258,
"line_max": 128,
"alpha_frac": 0.5676251331,
"autogenerated": false,
"ratio": 4.009393680614859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5077018813714859,
"avg_score": null,
"num_lines": null
} |
# AI has randomly generated stats with a total of 20 Skill points. each jitsu (it gets 4) is ranked from highest to lowest, with a class asigned to it.
# if statements determine what jitsu TYPE should be used, and the stats of jitsus are balanced from highest to lowest. AI randomly chooses 1
# each jitsu has a number attached to it, use 1, 2 and so on. Randomly selected in
# this section is turn 1, prints turn one stats, weapon used, and other things into log batch file or txt file for observation
# the ninjas take turns, each turn being logged, and the subsequent NOT global variables for their max health ect.
# each turn is logged into file and winner is saved into file, with all stats.
# while statement procedurely adds to number of ninjas with a while ... +1 to the number of ninjas. Each ninja has a higher number, 01, 02, 03 and so on.
i = 2
while i > 1:
Ninja_(i) = AI_Ninjas(object)
class AI_Ninjas(object):
def __init__(self):
self.BaseStrength = random.randint(1 + Level, 6 * Level)
self.BaseAgility = random.randint(1 + Level, 6 * Level)
self.BasePerception = random.randint(1 + Level, 6 * Level)
self.BaseChakra = random.randint(15 * Level, 30 * Level)
self.BaseHp = random.randint(15 * Level, 30 * Level)
self.jitsus = jitsus_for_(i)
def __str__(self):s
print(self.name)
i += 1
| {
"repo_name": "TheMuffinMan5930/NarutoBattle",
"path": "Rudimentary AI.py",
"copies": "1",
"size": "1400",
"license": "mit",
"hash": -3459622945905392600,
"line_mean": 59.8695652174,
"line_max": 153,
"alpha_frac": 0.6857142857,
"autogenerated": false,
"ratio": 3.4739454094292803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.465965969512928,
"avg_score": null,
"num_lines": null
} |
""" AIML Loader that can load zipped AIML2.0 XML files with an AIML1.0 parser in python 3
TODO:
fix doctests
>>> from nlpia.loaders import get_data
>> alice_path = get_data('alice')
>> bot = create_brain(alice_path) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Loading ...
>> len(bot._brain._root.keys())
3445
>> bot._brain._root['HI']
{'EVERYBODY': {3: {1: {4: {1: {2: ['template', {}, ...
>> bot.respond("Hi how are you?")
'Hi there!. I am fine, thank you.'
>> bot.respond("hi how are you?")
"Hi there!. I'm doing fine thanks how are you?"
>> bot.respond("hi how are you?")
'Hi there!. I am doing very well. How are you ?'
>> bot.respond("hi how are you?")
'Hi there!. My logic and cognitive functions are normal.'
>> bot.respond("how are you?")
'My logic and cognitive functions are normal.'
>> bot.respond("how are you?")
'I am functioning within normal parameters.'
>> bot.respond("how are you?")
'My logic and cognitive functions are normal.'
>> bot.respond("how are you?")
'I am functioning within normal parameters.'
>> bot.respond("how are you?")
'I am doing very well. How are you ?'
"""
import os
import zipfile
from traceback import format_exc
from nlpia.constants import logging
log = logging.getLogger(__name__)
try:
from aiml_bot import Bot
from aiml_bot.aiml_parser import AimlParserError
except:
class Bot:
pass
class AimlParserError:
pass
log.error('Unable to import aiml_bot.aiml_parser and aiml_bot.Bot, so nlpia will not be able to parse AIML files.')
from nlpia.constants import logging
from nlpia.constants import BIGDATA_PATH
from nlpia.futil import find_data_path
logger = logging.getLogger(__name__)
def concatenate_aiml(path='aiml-en-us-foundation-alice.v1-9.zip', outfile='aiml-en-us-foundation-alice.v1-9.aiml'):
"""Strip trailing </aiml> tag and concatenate all valid AIML files found in the ZIP."""
path = find_data_path(path) or path
zf = zipfile.ZipFile(path)
for name in zf.namelist():
if not name.lower().endswith('.aiml'):
continue
with zf.open(name) as fin:
happyending = '#!*@!!BAD'
for i, line in enumerate(fin):
try:
line = line.decode('utf-8').strip()
except UnicodeDecodeError:
line = line.decode('ISO-8859-1').strip()
if line.lower().startswith('</aiml>') or line.lower().endswith('</aiml>'):
happyending = (i, line)
break
else:
pass
if happyending != (i, line):
print('Invalid AIML format: {}\nLast line (line number {}) was: {}\nexpected "</aiml>"'.format(
name, i, line))
def extract_aiml(path='aiml-en-us-foundation-alice.v1-9'):
""" Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths """
path = find_data_path(path) or path
if os.path.isdir(path):
paths = os.listdir(path)
paths = [os.path.join(path, p) for p in paths]
else:
zf = zipfile.ZipFile(path)
paths = []
for name in zf.namelist():
if '.hg/' in name:
continue
paths.append(zf.extract(name, path=BIGDATA_PATH))
return paths
def create_brain(path='aiml-en-us-foundation-alice.v1-9.zip'):
""" Create an aiml_bot.Bot brain from an AIML zip file or directory of AIML files """
path = find_data_path(path) or path
bot = Bot()
num_templates = bot._brain.template_count
paths = extract_aiml(path=path)
for path in paths:
if not path.lower().endswith('.aiml'):
continue
try:
bot.learn(path)
except AimlParserError:
log.error(format_exc())
log.warning('AIML Parse Error: {}'.format(path))
num_templates = bot._brain.template_count - num_templates
log.info('Loaded {} trigger-response pairs.\n'.format(num_templates))
print('Loaded {} trigger-response pairs from {} AIML files.'.format(bot._brain.template_count, len(paths)))
return bot
| {
"repo_name": "totalgood/nlpia",
"path": "src/nlpia/clean_alice.py",
"copies": "1",
"size": "4128",
"license": "mit",
"hash": 8243229993944606000,
"line_mean": 33.4,
"line_max": 119,
"alpha_frac": 0.617248062,
"autogenerated": false,
"ratio": 3.480607082630691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9585700935814212,
"avg_score": 0.002430841763295963,
"num_lines": 120
} |
# Aim: Mostly for phenix users and those don't like using Miniconda
# 1. wget url_to_tar_file.tar
# 2. tar -xf url_to_tar_file.tar
# 3. source amber17/ambersh
# 4. Just it
""" Usage example: python pack_non_conda.py ambertools-17.0.1-py27_1.tar.bz2
Note: You can use file pattern
This script will unpack that bz2 file, then do some editing, then pack it to ./non-conda-install folder.
This should be done after doing conda-build
"""
import os
import subprocess
from glob import glob
import argparse
# local file, in the same folder as this script
from edit_package import editing_conda_package
import update_shebang
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tarfile', nargs='?', help='targer file')
parser.add_argument(
"--output-dir",
type=str,
default='./non-conda-install',
dest="output_dir",
help="output directory")
parser.add_argument(
"--date", action="store_true", help="Add date to output tarfile")
parser.add_argument("-d", "--dry_run", action="store_true", help="dry run")
opt = parser.parse_args()
pack_non_conda_package(opt)
def pack_non_conda_package(opt):
with editing_conda_package(
opt.tarfile,
output_dir=opt.output_dir,
add_date=opt.date,
dry_run=opt.dry_run):
update_shebang.update_python_env('./bin/')
# No need to copy here since we alread done in conda build step?
if __name__ == '__main__':
main()
| {
"repo_name": "Amber-MD/ambertools-conda-build",
"path": "conda_tools/pack_non_conda.py",
"copies": "1",
"size": "1508",
"license": "mit",
"hash": -6768139214133132000,
"line_mean": 27.4528301887,
"line_max": 104,
"alpha_frac": 0.6558355438,
"autogenerated": false,
"ratio": 3.4350797266514808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9590016797854894,
"avg_score": 0.00017969451931716085,
"num_lines": 53
} |
# Aim of this file is to create a few classes and methods to test
# my ideas (Adrian)
# Main documentation for this file is the Wiki:
# https://github.com/aernesto/dots-reversal-ideal-obs/wiki/Python-classes-and
# -methods
# Comment practice: In this script, we write comments before the targeted
# instruction
import numpy as np
# Overarching class
class Experiment(object):
def __init__(self, setof_stim_noise, setof_trial_dur, setof_h, tot_trial,
outputs='perf_acc_last_cp', states=np.array([-1, 1]),
exp_prior=np.array([.5,.5])):
self.states = states
self.setof_stim_noise = setof_stim_noise
self.setof_trial_dur = setof_trial_dur # for now an integer in msec.
self.tot_trial = tot_trial
self.outputs = outputs
self.setof_h = setof_h
self.results = []
self.exp_prior = exp_prior # TODO: check entries >=0 and sum to 1
# Corresponds to 25 frames/sec (for stimulus presentation)
self.exp_dt = 40
# function that switches the environment state that is given as argument
def switch(self, H):
try:
# might be more elegant to use elseif syntax below
if H in self.states:
if H == self.states[0]:
return self.states[1]
else:
return self.states[0]
else:
raise ValueError("Error in argument H: must be an element of "
"Experiment.states")
except AttributeError as err:
print(err.args)
def launch(self, observer):
for trial_idx in range(self.tot_trial):
h = 1
duration = self.setof_trial_dur
stim_noise = self.setof_stim_noise
trial_number = trial_idx
if np.random.uniform() < self.exp_prior[0]:
init_state = self.states[0]
else:
init_state = self.states[1]
curr_exp_trial = ExpTrial(self, h, duration, stim_noise,
trial_number, init_state)
curr_stim = Stimulus(curr_exp_trial)
curr_obs_trial = ObsTrial(observer, curr_exp_trial, curr_stim)
curr_obs_trial.infer()
# curr_exp_trial.save()
# curr_obs_trial.save()
self.save()
def save(self):
print('temporary string') # temporary
def parallel_launch(self):
return 0 # temporary
# Corresponds to single trial constants
class ExpTrial(object):
def __init__(self, expt, h, duration, stim_noise, trial_number,
init_state):
self.expt = expt
self.true_h = h
self.duration = duration
self.stim_noise = stim_noise
self.trial_number = trial_number
self.init_state = init_state
self.cp_times = self.gen_cp(self.duration, self.true_h)
self.end_state = self.compute_endstate(self.cp_times.size)
self.tot_trial = self.expt.tot_trial
def compute_endstate(self, ncp):
# the fact that the last state equals the initial state depends on
# the evenness of the number of change points.
if ncp % 2 == 0:
return self.init_state
else:
return self.expt.switch(self.init_state)
# def save(self):
# print('stimulus is:')
# print(self.stim)
# the following is the likelihood used to generate stimulus values,
# given the true state H of the environment
def lh(self, H):
# try clause might be redundant (because switch method does it)
try:
if H in self.expt.states:
return np.random.normal(H, self.stim_noise)
else:
raise ValueError("Error in argument H: must be an element of "
"Experiment.states")
except AttributeError as err:
print(err.args)
'''
generates poisson train of duration milliseconds with rate true_h in Hz,
using the Gillespie algorithm.
print statements are only there for debugging purposes
'''
def gen_cp(self, duration, true_h):
# TODO: Generate a warning if >1 ch-pt occur in Experiment.exp_dt window
# print('launching gen_cp')
# convert duration into seconds.
secdur = duration / 1000.0
# print('secdur = '), secdur
'''
pre-allocate ten times the mean array size
for speed, will be shrinked after computation
'''
nEntries = int(np.ceil(10 * true_h * secdur))
# print('allocated entries = '), nEntries
t = np.zeros((nEntries, 1))
totalTime = 0
eventIdx = -1
while totalTime < secdur:
sojournTime = np.random.exponential(1. / true_h)
totalTime += sojournTime
eventIdx += 1
t[eventIdx] = totalTime
# trim unused nodes, and maybe last event if occurred beyond secdur
# print t[0:10]
lastEvent, idxLastEvent = t.max(0), t.argmax(0)
# print 'lastEvent = ', lastEvent, 'idxLastEvent = ', idxLastEvent
if lastEvent > secdur:
idxLastEvent -= 1
if idxLastEvent == -1:
t = np.zeros((0, 1))
else:
t = t[0:int(idxLastEvent) + 1]
return t
class Stimulus(object):
def __init__(self, exp_trial):
self.exp_trial = exp_trial
self.trial_number = self.exp_trial.trial_number
self.stim = self.gen_stim()
def gen_stim(self):
binsize = self.exp_trial.expt.exp_dt # in msec
# number of bins, i.e. number of stimulus values to compute
nbins = (self.exp_trial.duration - 1) / binsize
# stimulus vector to be filled by upcoming while loop
stimulus = np.zeros((nbins, 1))
# loop variables
bin_nb = 1
last_envt = self.exp_trial.init_state
cp_idx = 0
while bin_nb < nbins:
stim_idx = bin_nb - 1 # index of array entry to fill in
# check environment state in current bin
curr_time = (bin_nb - 1) * binsize # in msec
if curr_time < self.exp_trial.cp_times[cp_idx]:
new_envt = last_envt
else:
new_envt = self.exp_trial.expt.switch(last_envt)
# compute likelihood to generate stimulus value
stimulus[stim_idx] = self.exp_trial.lh(new_envt)
# update variables for next iteration
last_envt = new_envt
cp_idx += 1
bin_nb += 1
return stimulus
# Level 2
class IdealObs(object):
def __init__(self, dt, expt, prior_states=np.array([.5, .5]),
prior_h=np.array([1, 1])):
try:
if (expt.setof_trial_dur % dt) == 0:
self.dt = dt # in msec
else:
raise AttributeError("Error in arguments: the observer's time"
"step size "
"'dt' "
"does not divide "
"the trial durations 'setof_trial_dur'")
except AttributeError as err:
print(err.args)
self.prior_h = prior_h
self.prior_states = prior_states
self.expt = expt # reference to Experiment object
self.obs_noise = self.expt.setof_stim_noise
class ObsTrial(object):
def __init__(self, observer, exp_trial, stimulus):
self.observer = observer
self.exp_trial = exp_trial
self.stimulus = stimulus
self.llr = []
self.decision = 0
self.obs_noise = self.exp_trial.stim_noise
self.trial_number = self.exp_trial.trial_number
self.obs = self.gen_obs()
def gen_obs(self):
return self.stimulus.stim
def infer(self):
# TODO: import MATLAB code
self.llr = np.ones(self.exp_trial.duration)
self.decision = 1
# def save(self):
# print('observations are:')
# print(self.observations)
# Test code
Expt = Experiment(setof_stim_noise=1, setof_trial_dur=5, setof_h=1,
tot_trial=1)
Observer = IdealObs(dt=1, expt=Expt)
Expt.launch(Observer)
| {
"repo_name": "aernesto/dots-reversal-ideal-obs",
"path": "class_tests_Adrian.py",
"copies": "1",
"size": "8308",
"license": "mit",
"hash": 3725155238078859000,
"line_mean": 32.3654618474,
"line_max": 80,
"alpha_frac": 0.5607847857,
"autogenerated": false,
"ratio": 3.8022883295194507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48630731152194506,
"avg_score": null,
"num_lines": null
} |
# aiMove.py
# various movement AI scripts for mobs
import random
import Globals
class Timer: # this was copied directly from World.py, and should be exactly the same at all times.
"""
A timer is an object that, when spawned, will count down until zero, and then perform and action and possibly respawn itself
"""
def __init__(self, TIMERS, time, actionFunction, actionArgs = [], attachedTo = None, respawns = False):
self.TIMERS = TIMERS
self.time = time
self.actionFunction = actionFunction
self.actionArgs = actionArgs
self.attachedTo = attachedTo
self.respawns = respawns
self.currentTime = float(time)
self.count = 0
self.TIMERS.append(self)
# self.initialTime = time.clock()
# self.lastTime = 0
def tick(self, deltaTime):
"""tick should be called once each game loop"""
#timePassed = (time.clock() - self.initialTime) - self.lastTime
self.currentTime -= deltaTime
#print self.currentTime
#print "tick"
self.count += 1
#print str(self.count) + self.attachedTo.owner.owner.name
if self.currentTime <= 0:
#print "time out. " + str(self.attachedTo.owner.owner.name) + " " + str(self)
if self in Globals.TIMERS:
Globals.TIMERS.remove(self)
elif self in Globals.MoveTIMERS:
Globals.MoveTIMERS.remove(self)
#print "removed " + str(self)
if self.actionArgs != []:
self.actionFunction(self.actionArgs)
else:
self.actionFunction()
#print "timers:" + str(Globals.TIMERS)
# if self.respawns == True:
# self.currentTime = self.time
# Globals.TIMERS.append(self)
#---------------------------------------------------------
class movementAI:
def __init__(self, mob, time):
self.mob = mob
self.time = time
self.Timer = Timer(Globals.MoveTIMERS, self.time, None, None, self, True)
def selector(self, oddsList): # pick a random selection from an odds list and return it.
# an odds list is a list containing any number of smaller lists, each with the format of [<choice>,<odds value>]
totalOdds = 0
for sel in oddsList:
totalOdds += sel[1]
oddSum = 0
selection = random.randint(0, totalOdds)
for sel in oddsList:
oddSum += sel[1]
if oddSum >= selection:
break
# print sel
return sel
def introvertRandom(self, args): # randomly chooses an exit, and takes it. Much greater chances of moving when mobs of the same type are in the room.
odds = 1 # Essentially, these mobs flee other mobs of their own type. They want to find their own room.
for mob in self.mob.currentRoom.mobs:
odds +=1
#check if mob should move
oddsList = [[True, odds], [False, 2]] #basic 50/50 odds to move with one other mob present, with the odds increasing as the number of mobs in the room goes up
winner = self.selector(oddsList)
#if mob should move, select a random exit and move there
# if self.mob.currentRoom != None:
# print " moveCheck - " + str(self.mob.name) +"["+ str(self.mob.currentRoom.region) + ":" + str(self.mob.currentRoom.name) +"]" + " " + str(winner)
# else:
# print " moveCheck - " + str(self.mob.name) + " " + str(winner)
if winner[0] == 'True' or winner[0] == True:
newRoom = None
if self.mob.currentRoom != None:
randRange = len(self.mob.currentRoom.exits) - 1
selection = random.randint(0, randRange)
exitList = []
for exit in self.mob.currentRoom.exits:
exitList.append(exit)
# print exitList
selectedExit = exitList[selection]
if self.mob.currentRoom.name == selectedExit:
selectedExit = None
for room in Globals.regionListDict[self.mob.currentRoom.region]:
if Globals.regionListDict[self.mob.currentRoom.region][room].name == selectedExit:
newRoom = Globals.regionListDict[self.mob.currentRoom.region][room]
if newRoom != None:
for player in self.mob.currentRoom.players:
player.client.send_cc("^y%s left.^~\n" %self.mob.name.capitalize())
oldMobRoom = self.mob.currentRoom
self.mob.currentRoom.mobs.remove(self.mob)
self.mob.currentRoom = newRoom
newRoom.mobs.append(self.mob)
print "Mm " + str(self.mob) + " " + str(self.mob.name) + ": from [" + str(oldMobRoom.region) +":"+ str(oldMobRoom.name) + "] to [" + str(self.mob.currentRoom.region) +":"+ str(self.mob.currentRoom.name) + "]"
for player in self.mob.currentRoom.players:
player.client.send_cc("^yA %s has entered.^~\n" %self.mob.name)
self.resetTimer()
def extrovertRandom(self, args): # randomly chooses an exit, and takes it. Much greater chances of moving when there are no other mobs of the same type in the room.
odds = 0 # this AI tends to cause mobs to 'clump up' in a room, with them being less prone to leaving a room the more mobs of the same type that arrive.
for mob in self.mob.currentRoom.mobs:
if mob.name == self.mob.name:
odds +=1
#check if mob should move
oddsList = [[True, 2], [False, odds]] #basic 50/50 odds to move when one other mob is present, with the odds decreasing as the number of mobs in the room of the same type goes up
winner = self.selector(oddsList)
#if mob should move, select a random exit and move there
# if self.mob.currentRoom != None:
# print " moveCheck - " + str(self.mob.name) +"["+ str(self.mob.currentRoom.region) + ":" + str(self.mob.currentRoom.name) +"]" + " " + str(winner)
# else:
# print " moveCheck - " + str(self.mob.name) + " " + str(winner)
if winner[0] == 'True' or winner[0] == True:
newRoom = None
if self.mob.currentRoom != None:
randRange = len(self.mob.currentRoom.exits) - 1
selection = random.randint(0, randRange)
exitList = []
for exit in self.mob.currentRoom.exits:
exitList.append(exit)
# print exitList
selectedExit = exitList[selection]
if self.mob.currentRoom.name == selectedExit:
selectedExit = None
for room in Globals.regionListDict[self.mob.currentRoom.region]:
if Globals.regionListDict[self.mob.currentRoom.region][room].name == selectedExit:
newRoom = Globals.regionListDict[self.mob.currentRoom.region][room]
if newRoom != None:
for player in self.mob.currentRoom.players:
player.client.send_cc("^y%s left.^~\n" %self.mob.name.capitalize())
oldMobRoom = self.mob.currentRoom
if self.mob in self.mob.currentRoom.mobs:
self.mob.currentRoom.mobs.remove(self.mob)
self.mob.currentRoom = newRoom
newRoom.mobs.append(self.mob)
print "Mm " + str(self.mob) + " " + str(self.mob.name) + ": from [" + str(oldMobRoom.region) +":"+ str(oldMobRoom.name) + "] to [" + str(self.mob.currentRoom.region) +":"+ str(self.mob.currentRoom.name) + "]"
for player in self.mob.currentRoom.players:
player.client.send_cc("^yA %s has entered.^~\n" %self.mob.name)
else:
if self.Timer in Globals.MoveTIMERS:
Globals.MoveTIMERS.remove(self.Timer)
self.resetTimer()
def doNotMove(self, args): # don't ever move from the room the mob spawned in
self.resetTimer()
def basicRandom(self, args): # randomly choose an exit, and take it. Unaffected by number of mobs in the room, always 50/50 chance of moving
oddsList = [[True, 4], [False, 1]]
#print self.mob.currentRoom
winner = self.selector(oddsList)
#if mob should move, select a random exit and move there
# print self.mob
# print Globals.mobsFromFile
# print self.mob.currentRoom
# if self.mob.currentRoom != None:
# print " moveCheck - " + str(self.mob.name) +"["+ str(self.mob.currentRoom.region) + ":" + str(self.mob.currentRoom.name) +"]" + " " + str(winner)
# else:
# print " moveCheck - " + str(self.mob.name) + " " + str(winner)
if winner[0] == 'True' or winner[0] == True:
# print "winner"
# print self.mob.currentRoom
if self.mob.currentRoom != None:
newRoom = None
# print "has room"
randRange = len(self.mob.currentRoom.exits) - 1
selection = random.randint(0, randRange)
exitList = []
for exit in self.mob.currentRoom.exits:
exitList.append(exit)
# print exitList
selectedExit = exitList[selection]
if self.mob.currentRoom.name == selectedExit:
selectedExit = None
for room in Globals.regionListDict[self.mob.currentRoom.region]:
if Globals.regionListDict[self.mob.currentRoom.region][room].name == selectedExit:
newRoom = Globals.regionListDict[self.mob.currentRoom.region][room]
if newRoom != None:
for player in self.mob.currentRoom.players:
player.client.send_cc("^y%s left.^~\n" %self.mob.name.capitalize())
oldMobRoom = self.mob.currentRoom
self.mob.currentRoom.mobs.remove(self.mob)
self.mob.currentRoom = newRoom
newRoom.mobs.append(self.mob)
# print oldMobRoom
# print self.mob.currentRoom
print "Mm " + str(self.mob) + " " + str(self.mob.name) + ": from [" + str(oldMobRoom.region) +":"+ str(oldMobRoom.name) + "] to [" + str(self.mob.currentRoom.region) +":"+ str(self.mob.currentRoom.name) + "]"
for player in self.mob.currentRoom.players:
player.client.send_cc("^yA %s has entered.^~\n" %self.mob.name)
self.resetTimer()
def resetTimer(self):
self.Timer.currentTime = self.time
# print "resetTimer:" + str(self.Timer) + " " + str(self.Timer.currentTime) + " " + str(self.mob)
# print Globals.mobsFromFile
# found = False
# for mob in Globals.mobsFromFile:
# if mob == self.mob:
# found = True
# print "found " + str(self.mob) + " " + str(mob)
# if found == False:
Globals.MoveTIMERS.append(self.Timer)
# print "self.mob appended"
# print Globals.MoveTIMERS | {
"repo_name": "buckets1337/MotherMUD",
"path": "aiMove.py",
"copies": "1",
"size": "9771",
"license": "apache-2.0",
"hash": -4014692986062936000,
"line_mean": 36.2977099237,
"line_max": 213,
"alpha_frac": 0.6536690206,
"autogenerated": false,
"ratio": 2.969006381039198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41226754016391975,
"avg_score": null,
"num_lines": null
} |
#a Imports
from gjslib.math import vectors, matrix
#a c_set_of_lines
class c_set_of_lines(object):
def __init__(self):
self.lines = []
pass
def add_line(self, pt, drn):
drn = list(drn)
drn = vectors.vector_normalize(drn)
self.lines.append( (pt,drn) )
pass
def generate_meeting_points(self, too_close=0.0001):
self.line_meetings = []
self.weighted_points = []
self.posn = None
for i in range(len(self.lines)):
(p0,d0) = self.lines[i]
for j in range(len(self.lines)):
if (i>j):
(p1,d1) = self.lines[j]
meet = vectors.closest_meeting_of_two_lines(p0,d0,p1,d1,too_close)
self.line_meetings.append(meet)
pass
pass
pass
if len(self.line_meetings)==0:
return
posn = None
total_weight = 0
for (c0,c1,dist,goodness) in self.line_meetings:
weight = 1/(5.0+goodness)
if posn is None:
posn = (0.0,0.0,0.0)
pass
posn = vectors.vector_add(posn, c0, scale=0.5*weight)
posn = vectors.vector_add(posn, c1, scale=0.5*weight)
total_weight += weight
#print c0,c1,weight,total_weight,posn
self.weighted_points.append((weight,c0,c1))
pass
#print posn, total_weight
if posn is not None:
self.posn = vectors.vector_add((0.0,0.0,0.0), posn, scale=1/total_weight)
pass
pass
#a Top level
if __name__=="__main__":
c = c_set_of_lines()
c.add_line( (0.0,0.0,0.0), (0.0,0.0,1.0) )
c.add_line( (0.0,0.1,1.1), (1.0,0.0,1.0) )
c.generate_meeting_points()
print c.line_meetings
| {
"repo_name": "embisi-github/gjslib",
"path": "python/gjslib/math/line_sets.py",
"copies": "1",
"size": "1834",
"license": "apache-2.0",
"hash": -776752342993064100,
"line_mean": 31.75,
"line_max": 86,
"alpha_frac": 0.5070883315,
"autogenerated": false,
"ratio": 3.0164473684210527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40235356999210525,
"avg_score": null,
"num_lines": null
} |
#a Imports
from gjslib.math.line_sets import c_set_of_lines
from gjslib.math.quaternion import c_quaternion
import image_projection
#a c_point_mapping
class c_point_mapping(object):
#f __init__
def __init__(self, images_dir=""):
self.reset()
self.images_dir = images_dir
self.object_guess_locations = {}
self.object_guess_locations["clk.center"] = ( 0.0, -0.32, 8.4)
self.object_guess_locations["lspike.t"] = ( -3.3, 0.0, 10.9)
self.object_guess_locations["rspike.t"] = ( 3.3, 0.0, 10.9)
self.object_guess_locations["cp"] = ( 0.0, -2.0, 4.2)
#self.object_guess_locations["cp"] = ( 0.0, -1.5, 3.8)
self.object_guess_locations["calc.t.bl"] = ( 10.2, 18.993, 2.01)
self.object_guess_locations["calc.b.fr"] = ( 24.0, 0.00, 0.01)
self.object_guess_locations["clips.b.fr"] = ( 0.0, 0.05, 0.02)
self.object_guess_locations["clips.t.fr"] = ( 0.02, 0.08, 7.60)
self.object_guess_locations["clips.b.fr"] = ( 0.0, 0.05, 0.1)
self.object_guess_locations["clips.t.fr"] = ( 0.02, 0.08, 8.00)
# 8.10 has error 0.0011090
# 8.01 has error 0.0001089
# 8.00 has error 0.0001028
# 7.95 has error 0.0001499
# 7.90 has error 0.0002159
# 7.80 has error 0.0003883
# 7.70 has error 0.0006423
# 7.60 has error 0.0009146
self.object_guess_locations["clips.t.fr"] = ( -0.01, 0.08, 8.00)
# x=0.02 has error 0.0001028
# x=0.00 has error 0.00009813
# x=-0.01 has error 0.00009819
self.object_guess_locations["clips.t.fr"] = ( 0.00, 0.11, 8.00)
# y=0.08 has error 0.00009813
# y=0.09 has error 0.00009220
# y=0.10 has error 0.00008804
# y=0.11 has error 0.00008661
# y=0.13 has error 0.00009180
# window is 0.656 of a tile
# top of window is 1.33 tiles above ground
self.object_guess_locations["flsq1.0"] = ( 0.0, 0.0, 0.0)
#self.object_guess_locations["flsq3.3"] = ( 0.0, -3.0, 0.0)
self.object_guess_locations["flsq4.2"] = ( 3.0, -3.0, 0.0)
self.object_guess_locations["flsq5.3"] = ( 0.0, -5.0, 0.0)
#self.object_guess_locations["flsq2.0"] = ( 2.0, 0.0, 0.0)
#self.object_guess_locations["drsq1.1"] = ( 0.15, 1.5, 1.33)
#self.object_guess_locations["drsq1.1"] = ( 0.15, 1.5, 1.30)
#self.object_guess_locations["drsq1.1"] = ( 0.15, 1.5, 1.31)
#self.object_guess_locations["drsq1.1"] = ( 0.13, 1.5, 1.31)
#self.object_guess_locations["drsq1.1"] = ( 0.13, 1.51, 1.31)
self.object_guess_locations["drsq3.0"] = ( 0.13, 1.51, 3.50)
self.object_guess_locations["drsq3.0"] = ( 0.13, 1.51, 3.45)
self.object_guess_locations["drsq3.0"] = ( 0.13, 1.50, 3.515)
self.object_guess_locations["drsq3.0"] = ( 0.13, 1.50, 3.48)
pass
#f reset
def reset(self):
self.image_mappings = {}
self.descriptions = {}
self.reference_positions = {}
self.positions = {}
self.images = {}
self.images_for_point_resoluion = []
pass
#f get_images
def get_images(self):
return self.images.keys()
#f get_image_data
def get_image_data(self, image_name):
return self.images[image_name]
#f load_data_add_image
def load_data_add_image(self, data):
image_name = data[0]
image_filename = self.images_dir+data[1]
image_size = (int(data[2])+0.0,int(data[3])+0.0)
use_for_points = True
if len(data)<5 or not int(data[4]):
use_for_points = False
pass
self.add_image(image=data[0], filename=image_filename, size=image_size, use_for_points=use_for_points)
pass
#f load_data_add_point
def load_data_add_point(self, data):
self.add_named_point(data[0],data[1])
pass
#f load_data_add_reference
def load_data_add_reference(self, data):
self.add_named_point(data[0])
pass
#f load_data_add_mapping
def load_data_add_mapping(self, data):
image = data[0]
point = data[1]
xy = (float(data[2]),float(data[3]))
self.add_named_point(data[1])
self.add_image_location(point, image, xy)
pass
#f load_data_set_projection
def load_data_set_projection(self, data):
image = data[0]
self.images[image]["projection"].load_projection_strings(data[1:])
pass
#f load_data
def load_data(self, data_filename):
data_load_callbacks = {}
data_load_callbacks["Images"] = (self.load_data_add_image,4)
data_load_callbacks["Points"] = (self.load_data_add_point,2)
data_load_callbacks["References"] = (self.load_data_add_reference,4)
data_load_callbacks["Projections"] = (self.load_data_set_projection,9)
data_load_callbacks["Mapping"] = (self.load_data_add_mapping,4)
f = open(data_filename,"r")
if not f:
raise Exception("Failed to read point mapping file '%s'"%data_filename)
data_stage = "Images"
for l in f:
l = l.strip()
if len(l)==0: continue
if l[0]=='#': continue
if l[1]=='-':
if l[2:-1] in data_load_callbacks:
data_stage = l[2:-1]
pass
else:
raise Exception("Bad separator '%s' in mapping file"%l)
continue
data = l.split(',')
for i in range(len(data)):
data[i] = data[i].strip()
pass
(cb,min_args) = data_load_callbacks[data_stage]
if len(data)<min_args:
raise Exception("Needed more arguments (at least %d) in line '%s' of mapping file for '%s'"%(min_args,l,data_stage))
cb(data)
pass
f.close()
return
#f save_data
def save_data(self, data_filename):
f = open(data_filename,"w")
point_names = self.image_mappings.keys()
point_names.sort()
image_names = self.images.keys()
image_names.sort()
print >>f, "--Images:"
for name in image_names:
image = self.images[name]
print >>f,"%s,%s,%d,%d,%d"%(name,image["filename"],image["size"][0],image["size"][1],image["use_for_points"])
pass
print >>f, "\n"
print >>f, "--Projections:"
for name in image_names:
if "projection" not in self.images[name]:
continue
proj = self.images[name]["projection"]
if proj is None:
continue
text = proj.save_projection_string()
if text is None:
continue
print >>f, "%s,%s"%(name,text)
pass
print >>f, "\n"
print >>f, "--Points:"
for name in point_names:
desc = ""
if name in self.descriptions:
desc = self.descriptions[name]
print >>f, "%s,%s"%(name,desc)
pass
print >>f, "\n"
print >>f, "--References:"
print >>f, "\n"
print >>f, "--Mapping:"
for name in point_names:
for image in image_names:
if image in self.image_mappings[name]:
xy = self.image_mappings[name][image]
print >>f, "%s,%s,%f,%f"%(image,name,xy[0],xy[1])
pass
pass
pass
print >>f, "\n"
f.close()
pass
#f add_named_point
def add_named_point(self,name,description=None):
if name not in self.image_mappings:
self.image_mappings[name] = {}
pass
if description is not None:
self.descriptions[name]=description
pass
pass
#f add_image
def add_image(self,image, filename=None, size=(1.0,1.0), use_for_points=True):
if image not in self.images:
projection = image_projection.c_image_projection(name=image,
image_filename=filename,
size=size)
self.images[image] = {}
self.images[image]["filename"] = filename
self.images[image]["projection"] = projection
self.images[image]["size"] = size
self.images[image]["use_for_points"] = use_for_points
if use_for_points:
self.images_for_point_resoluion.append(image)
pass
pass
pass
#f add_image_location
def add_image_location(self,name,image,xy,uniform=False,verbose=False):
if uniform:
if uniform: xy = ((xy[0]+1.0)/2.0, (1.0-xy[1])/2.0)
size = self.images[image]["size"]
xy = (xy[0]*size[0],xy[1]*size[1])
pass
if verbose:
v = "Setting point %s in image %s to %s"
if image in self.image_mappings[name]:
v = "Moving point %s in image %s to %s"
pass
print v%(name,image,str(xy))
pass
undo_op = (name,image,None)
if image in self.image_mappings[name]:
old_xy = self.image_mappings[name][image]
undo_op = (name,image,(old_xy[0],old_xy[1]))
pass
self.image_mappings[name][image] = xy
return undo_op
#f delete_image_location
def delete_image_location(self,name,image,verbose=False):
if image not in self.image_mappings[name]:
if verbose:
print "Requested deleting point %s that is not in image %s"%(name,image)
pass
return None
if verbose:
print "Deleting point %s in image %s"%(name,image)
pass
undo_op = (name,image,self.image_mappings[name][image])
del(self.image_mappings[name][image])
return undo_op
#f undo_add_image_location
def undo_add_image_location(self, undo_op, verbose=False):
(name, image, xy) = undo_op
if xy is None:
self.delete_image_location(name,image,verbose=verbose)
pass
else:
self.add_image_location(name,image,xy,verbose=verbose)
pass
pass
#f undo_delete_image_location
def undo_delete_image_location(self, undo_op, verbose=False):
if undo_op is None:
return
(name, image, xy) = undo_op
self.add_image_location(name,image,xy,verbose=verbose)
pass
#f set_projection
def set_projection(self, image, projection):
self.images[image]["projection"].set_projection(projection)
pass
#f get_projection
def get_projection(self, image):
return self.images[image]["projection"].get_projection()
#f use_for_points
def use_for_points(self, image, value=None, toggle=None):
if toggle is not None:
value = 1 ^ self.images[image]["use_for_points"]
pass
if value is not None:
self.images[image]["use_for_points"] = value
if value is 0 and image in self.images_for_point_resoluion:
self.images_for_point_resoluion.remove(image)
pass
if value is 1 and image not in self.images_for_point_resoluion:
self.images_for_point_resoluion.append(image)
pass
pass
pass
#f get_mapping_names
def get_mapping_names(self):
return self.image_mappings.keys()
#f get_xy
def get_xy(self, name, image ):
if name not in self.image_mappings:
return None
if image not in self.image_mappings[name]:
return None
return self.uniform_mapping(name,image)
#f uniform_mapping
def uniform_mapping(self, name, image):
image_map = self.image_mappings[name]
if image not in image_map:
return None
xy = image_map[image]
scaled_xy = (-1.0+2.0*xy[0]/(self.images[image]["size"][0]+0.0), 1.0-2.0*xy[1]/(self.images[image]["size"][1]+0.0))
return scaled_xy
#f find_line_sets
def find_line_sets(self):
line_sets = {}
for n in self.image_mappings: # for each mapping of a point
line_sets[n] = c_set_of_lines()
for img_name in self.images_for_point_resoluion:
p = self.images[img_name]["projection"]
if p is None:
continue
xy = self.uniform_mapping(n,img_name)
if xy is None:
continue
line = p.model_line_for_image(xy)
if line is None:
continue
line_sets[n].add_line(line[0],line[1])
pass
line_sets[n].generate_meeting_points()
#print n, line_sets[n].line_meetings
pass
self.line_sets = line_sets
pass
#f approximate_positions
def approximate_positions(self):
for n in self.line_sets:
self.positions[n] = self.line_sets[n].posn
pass
pass
#f get_approx_position
def get_approx_position(self, name ):
if name not in self.positions:
return None
return self.positions[name]
#f get_xyz
def get_xyz(self, name, use_references=True ):
if use_references:
if name in self.object_guess_locations:
return self.object_guess_locations[name]
pass
return self.get_approx_position(name)
#f initial_orientation
def initial_orientation(self, image=None, **kwargs):
for image_name in self.images.keys():
if (image is not None) and image!=image_name:
continue
proj = self.images[image_name]["projection"]
projection = proj.guess_initial_orientation(point_mappings=self, **kwargs)
print "self.images['%s']['projection'] = %s"%(image_name,str(projection))
pass
#f optimize_projections
def optimize_projections(self, image=None, **kwargs):
for image_name in self.images.keys():
if (image is not None) and image!=image_name:
continue
proj = self.images[image_name]["projection"]
projection = proj.optimize_projection(point_mappings=self, **kwargs)
print "self.images['%s']['projection'] = %s"%(image_name,str(projection))
pass
pass
#f Done
pass
| {
"repo_name": "embisi-github/gjslib",
"path": "3d/image_point_mapping.py",
"copies": "1",
"size": "14734",
"license": "apache-2.0",
"hash": 7553273644891667000,
"line_mean": 38.2906666667,
"line_max": 132,
"alpha_frac": 0.5376001086,
"autogenerated": false,
"ratio": 3.4733616218764736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4510961730476473,
"avg_score": null,
"num_lines": null
} |
#a Imports
from overlap import c_overlap_2d
#a Structure element class - a piece of 'background' that has an image and is slightly dynamic
#c c_structure_element
class c_structure_element( object ):
"""
An element of a structure
This has a position and a list of images that can be used to represent the element on the screen
Most structure elements use just one image
"""
has_tick = False
debug_bboxes = False
#f __init__
def __init__( self, structure, position, images, size, **kwargs ):
self.structure = structure
self.position = (position[0], position[1])
self.images = images
self.image_number = 0
self.size = size
self.bbox = (self.position[0], self.position[1], self.position[0]+size[0], self.position[1]+size[1] )
pass
#f set_image
def set_image( self, image_number ):
self.image_number = image_number
if image_number>=len(self.images):
raise Exception("Attempt to set element image number to be %d which is greater than the image list length %d for the element"%(image_number,len(self.images)))
#f tick_start
def tick_start( self, **kwargs ):
raise Exception("Element needs a tick_start method if has_tick is True")
#f tick
def tick( self, **kwargs ):
raise Exception("Element needs a tick method if has_tick is True")
#f move
def move( self, by=None ):
old_bbox = self.bbox
if by is not None:
self.position = (self.position[0]+by[0],
self.position[1]+by[1])
self.bbox = (self.position[0], self.position[1], self.position[0]+self.size[0], self.position[1]+self.size[1] )
self.structure.move_element( element=self, old_bbox=old_bbox )
return
pass
#f blit
def blit( self, surface, view=None ):
self.images[self.image_number].blit( surface, self.position )
if self.debug_bboxes:
surface.rect( self.bbox )
pass
pass
#c c_structure
class c_structure( object ):
"""
This class contains the 'structure' of a level of a platform game
The structure is the set of elements that changes slowly or never; it can include walls, vanishing floors, and moving platforms
It should not include players and enemies
The structure is expected to reuse images for its elements; so it maintains a set of images, referred to by name
"""
#f __init__
def __init__( self, **kwargs ):
self.elements = {}
self.images = {}
self.overlap = c_overlap_2d( bucket_size=32 )
self.elements_to_tick = []
pass
#f add_image
def add_image( self, image_name, image ):
"""
Add an image to the set available to the structure elements
"""
self.images[ image_name ] = image
pass
#f add_element
def add_element( self, element_tag, image_names=[], element_class=c_structure_element, **kwargs ):
"""
Add an element to the structure
"""
images = []
for i in image_names: images.append(self.images[i])
element = element_class( structure = self,
images = images,
**kwargs )
self.elements[ element_tag ] = element
self.overlap.add_element( element.bbox, element )
if element.has_tick:
self.elements_to_tick.append(element)
pass
pass
#f move_element
def move_element( self, element, old_bbox ):
self.overlap.remove_element( old_bbox, element )
self.overlap.add_element( element.bbox, element )
pass
#f find_elements_overlapping
def find_elements_overlapping( self, bbox ):
return self.overlap.find_overlap( bbox )
#f tick_start
def tick_start( self, **kwargs ):
for e in self.elements_to_tick:
e.tick_start( **kwargs )
pass
pass
#f tick
def tick( self, **kwargs ):
for e in self.elements_to_tick:
e.tick( **kwargs )
pass
pass
#f blit
def blit( self, surface ):
for e in self.elements:
self.elements[e].blit( surface )
pass
pass
pass
| {
"repo_name": "embisi-github/gjslib",
"path": "python/gjslib/math/structure.py",
"copies": "1",
"size": "4317",
"license": "apache-2.0",
"hash": 3071361528450831000,
"line_mean": 34.975,
"line_max": 170,
"alpha_frac": 0.5916145471,
"autogenerated": false,
"ratio": 3.978801843317972,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070416390417972,
"avg_score": null,
"num_lines": null
} |
#a Imports
import gjslib.graphics.bone as bones
from gjslib.math.bezier import c_point
#a Test stuff
def build_ship():
skin_angles = { "jet":10.0, "bottom":70.0, "mid_b":65.0, "mid_m":85.0, "mid_t":115.0, "top":20.0 }
bone_angles = { "base":60.0, "mid_b":80.0, "mid_m":80.0, "mid_t":80.0, "fin_t":80.0, "fin_b":40.0 }
for b in bone_angles:
import random
bone_angles[b] += random.random()*5*0
for s in skin_angles:
import random
skin_angles[b] += random.random()*5*0
ship = bones.c_bone_group("ship")
ship.add_child( bones.c_bone_var("anchor_point", "vector") )
ship.add_child( bones.c_bone_var("direction", "vector") )
ship.add_child( bones.c_bone_var("size", "scalar") )
null_point = c_point((0.0,0.0))
ship.set_node( "anchor_point", c_point((0.0,0.0)) )
ship.set_node( "direction", c_point((0.0,1.0)) )
ship.set_node( "size", 5.0 )
bone_1 = bones.add_bezier_bone( ship, "bone1", num_pts=3, script = [ ("mult", ("get","size"), ("mult", 0.5, ("get","direction"))),
("dup"),
("set", "bone1.base_0", ("add", ("get","anchor_point"))), ("pop"),
("neg"),
("set", "bone1.base_2", ("add", ("get","anchor_point"))), ("pop"),
("set", "bone1.base_1", ("mult", 0.5, ("add", ("get", "bone1.base_0"), ("get", "bone1.base_2")))),
] )
bone2 = bones.add_extend_bone( ship, "bone2", "bone1", scale=-0.95, rotation= 0.0, src=0.0 )
bone3 = bones.add_extend_bone( ship, "bone3", "bone2", scale=0.5, rotation= 0.0, src=1.0 )
bone4 = bones.add_extend_bone( ship, "bone4", "bone1", scale=0.5, rotation= 0.0, src=1.0 )
bone5 = bones.add_extend_bone( ship, "bone5", "bone2", scale= 0.45, rotation= bone_angles["mid_t"], src=1.0 )
bone6 = bones.add_extend_bone( ship, "bone6", "bone2", scale= 0.45, rotation=-bone_angles["mid_t"], src=1.0 )
bone7 = bones.add_extend_bone( ship, "bone7", "bone1", scale=-0.8, rotation= bone_angles["mid_m"], src=0.0 )
bone8 = bones.add_extend_bone( ship, "bone8", "bone1", scale=-0.8, rotation=-bone_angles["mid_m"], src=0.0 )
bone9 = bones.add_extend_bone( ship, "bone9", "bone1", scale= 0.7, rotation=-bone_angles["mid_b"], src=1.0 )
bone10 =bones.add_extend_bone( ship, "bone10","bone1", scale= 0.7, rotation= bone_angles["mid_b"], src=1.0 )
bone11= bones.add_extend_bone( ship, "bone11","bone4", scale= 0.5, rotation=-bone_angles["base"], src=1.0 )
bone12= bones.add_extend_bone( ship, "bone12","bone4", scale= 0.5, rotation= bone_angles["base"], src=1.0 )
bone13= bones.add_bezier_bone( ship, "bone13", num_pts=4, script= bones.bezier4_script( ( ("bone9",1.0),
("bone9",1.0,0.5,180-skin_angles["mid_b"]),
("bone11",1.0,0.5, -skin_angles["bottom"]),
("bone11",1.0) ) ) )
bone14= bones.add_extend_bone( ship, "bone14","bone13", scale=-0.6, rotation=bone_angles["fin_t"], src=0.2 )
bone15= bones.add_extend_bone( ship, "bone15","bone14", scale=-0.8, rotation=bone_angles["fin_b"]-180.0, src=1.0 )
bone16= bones.add_bezier_bone( ship, "bone16", num_pts=4, script= bones.bezier4_script( ( ("bone10",1.0),
("bone10",1.0,0.5, -180+skin_angles["mid_b"]),
("bone12",1.0,0.5, skin_angles["bottom"]),
("bone12",1.0) ) ) )
bone17= bones.add_extend_bone( ship, "bone17","bone16", scale=-0.6, rotation=-bone_angles["fin_t"], src=0.2 )
bone18= bones.add_extend_bone( ship, "bone18","bone17", scale=-0.8, rotation=180.0-bone_angles["fin_b"], src=1.0 )
ship_plane = ship.add_child(bones.c_bone_plane("ship_plane"))
ship_skin = ship_plane.add_child(bones.c_bone_drawable("ship_skin",fill=(255,150,120), stroke=(0,0,0) ))
skin1 = bones.add_bezier_bone( ship_skin, "skin1", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone12",1.0),
("bone12",1.0,0.4, 180+skin_angles["jet"]),
("bone11",1.0,0.4, 180-skin_angles["jet"]),
("bone11",1.0) ) ) )
skin2 = bones.add_bezier_bone( ship_skin, "skin2", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone9",1.0),
("bone9",1.0,0.5, 180-skin_angles["mid_b"]),
("bone11",1.0,0.5, -skin_angles["bottom"]),
("bone11",1.0) ) ) )
skin3 = bones.add_bezier_bone( ship_skin, "skin3", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone10",1.0),
("bone10",1.0,0.5, -180+skin_angles["mid_b"]),
("bone12",1.0,0.5, skin_angles["bottom"]),
("bone12",1.0) ) ) )
skin4 = bones.add_bezier_bone( ship_skin, "skin4", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone9",1.0),
("bone9",1.0,-0.5, 180-skin_angles["mid_b"]),
("bone7",1.0,0.5, skin_angles["mid_m"]),
("bone7",1.0) ) ) )
skin5 = bones.add_bezier_bone( ship_skin, "skin5", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone10",1.0),
("bone10",1.0,0.5, skin_angles["mid_b"]),
("bone8",1.0,-0.5, 180-skin_angles["mid_m"]),
("bone8",1.0) ) ) )
skin6 = bones.add_bezier_bone( ship_skin, "skin6", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone7",1.0),
("bone7",1.0,-0.5, skin_angles["mid_m"]),
("bone5",1.0,0.5, 180-skin_angles["mid_t"]),
("bone5",1.0) ) ) )
skin7 = bones.add_bezier_bone( ship_skin, "skin7", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone8",1.0),
("bone8",1.0, 0.5, 180-skin_angles["mid_m"]),
("bone6",1.0,-0.5, skin_angles["mid_t"]),
("bone6",1.0) ) ) )
skin8 = bones.add_bezier_bone( ship_skin, "skin8", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone5",1.0),
("bone5",1.0,-0.5, 180-skin_angles["mid_t"]),
("bone3",1.0,0.3, 90+skin_angles["top"]),
("bone3",1.0) ) ) )
skin9 = bones.add_bezier_bone( ship_skin, "skin9", num_pts=4, scope=ship, script= bones.bezier4_script( ( ("bone6",1.0),
("bone6",1.0,0.5, skin_angles["mid_t"]),
("bone3",1.0,0.3, 270-skin_angles["top"]),
("bone3",1.0) ) ) )
ship_skin.add_child(bones.c_bone_ref("sk1", skin1, opts={"start":0.0,"end":1.0}))
ship_skin.add_child(bones.c_bone_ref("sk2", skin2, opts={"start":1.0,"end":0.0}))
ship_skin.add_child(bones.c_bone_ref("sk4", skin4, opts={"start":0.0,"end":1.0}))
ship_skin.add_child(bones.c_bone_ref("sk6", skin6, opts={"start":0.0,"end":1.0}))
ship_skin.add_child(bones.c_bone_ref("sk8", skin8, opts={"start":0.0,"end":1.0}))
ship_skin.add_child(bones.c_bone_ref("sk9", skin9, opts={"start":1.0,"end":0.0}))
ship_skin.add_child(bones.c_bone_ref("sk7", skin7, opts={"start":1.0,"end":0.0}))
ship_skin.add_child(bones.c_bone_ref("sk5", skin5, opts={"start":1.0,"end":0.0}))
ship_skin.add_child(bones.c_bone_ref("sk3", skin3, opts={"start":0.0,"end":1.0}))
leg1_plane = ship.add_child(bones.c_bone_plane("leg1_plane",depth=10))
leg1_skin = leg1_plane.add_child(bones.c_bone_drawable("leg1_skin",fill=(255,200,200), stroke=(0,0,0) ))
leg1skin1 = bones.add_bezier_bone( leg1_skin, "leg1skin1", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("skin2",0.75),
("bone14",0.50,0.4, 90.0),
("bone15",0.30,0.4, 90.0),
("bone15",1.0) ) ) )
leg1skin2 = bones.add_bezier_bone( leg1_skin, "leg1skin2", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("skin4",0.45),
("bone14",0.50,-0.4, 90.0),
("bone15",0.30,-0.4, 90.0),
("bone15",1.0) ) ) )
leg1_skin.add_child(bones.c_bone_ref("ls1", leg1skin1, opts={"start":1.0,"end":0.0}))
leg1_skin.add_child(bones.c_bone_ref("ls2", leg1skin2, opts={"start":0.0,"end":1.0}))
leg2_plane = ship.add_child(bones.c_bone_plane("leg2_plane", depth=10))
leg2_skin = leg2_plane.add_child(bones.c_bone_drawable("leg2_skin",fill=(255,200,200), stroke=(0,0,0) ))
leg2skin1 = bones.add_bezier_bone( leg2_skin, "leg2skin1", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("skin3",0.75),
("bone17",0.50,-0.4, 90.0),
("bone18",0.30,-0.4, 90.0),
("bone18",1.0) ) ) )
leg2skin2 = bones.add_bezier_bone( leg2_skin, "leg2skin2", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("skin5",0.45),
("bone17",0.50,0.4, 90.0),
("bone18",0.30,0.4, 90.0),
("bone18",1.0) ) ) )
leg2_skin.add_child(bones.c_bone_ref("ls1", leg2skin1, opts={"start":1.0,"end":0.0}))
leg2_skin.add_child(bones.c_bone_ref("ls2", leg2skin2, opts={"start":0.0,"end":1.0}))
decring1 = bones.add_bezier_bone( ship_skin, "decring1", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("skin6",0.8),
("bone2",0.9),
("bone2",0.9),
("skin7",0.8) ) ) )
decring2 = bones.add_bezier_bone( ship_skin, "decring2", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("skin6",0.0),
("bone2",0.05),
("bone2",0.05),
("skin7",0.0) ) ) )
decwind0 = bones.add_bezier_bone( ship_skin, "decwind0", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("decring2",0.2,0.08,90.0),
("decring2",0.3,0.08,90.0),
("decring2",0.5,0.1,90.0),
("decring2",0.6,0.1,90.0) ) ) )
decwind1 = bones.add_bezier_bone( ship_skin, "decwind1", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("decwind0",0.0),
("decwind0",0.0,1.17,90.0),
("decwind0",0.0,1.17,90.0),
("decwind0",0.5,1.0,90.0) ) ) )
decwind2 = bones.add_bezier_bone( ship_skin, "decwind2", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("decwind0",1.0),
("decwind0",1.0,1.5,90.0),
("decwind0",1.0,1.5,90.0),
("decwind0",0.5,1.0,90.0) ) ) )
decring3 = bones.add_bezier_bone( ship_skin, "decring3", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("skin4",0.3),
("bone1",0.9),
("bone1",0.9),
("skin5",0.3) ) ) )
decdoor0 = bones.add_bezier_bone( ship_skin, "decdoor0", num_pts=4, scope=(ship_skin,ship), script= bones.bezier4_script( ( ("decring3",0.65),
("decring3",0.6,0.4,90),
("decring3",0.95,0.4,90),
("decring3",0.9) ) ) )
decorations = ship.add_child(bones.c_bone_plane("decorations", depth=-10))
decring1_d = decorations.add_child(bones.c_bone_drawable("dec_ring1", stroke=(0,0,0) ))
decring1_d.add_child(bones.c_bone_ref("r1", decring1, opts={"start":0.0,"end":1.0}))
decring2_d = decorations.add_child(bones.c_bone_drawable("dec_ring2", stroke=(0,0,0) ))
decring2_d.add_child(bones.c_bone_ref("r2", decring2, opts={"start":0.0,"end":1.0}))
decring3_d = decorations.add_child(bones.c_bone_drawable("dec_ring3", stroke=(0,0,0) ))
decring3_d.add_child(bones.c_bone_ref("r3", decring3, opts={"start":0.0,"end":1.0}))
decwin_d = decorations.add_child(bones.c_bone_drawable("dec_win", fill=(255,255,200),stroke=(0,0,0) ))
decwin_d.add_child(bones.c_bone_ref("w0", decwind0, opts={"start":1.0,"end":0.0}))
decwin_d.add_child(bones.c_bone_ref("w1", decwind1, opts={"start":0.0,"end":1.0}))
decwin_d.add_child(bones.c_bone_ref("w2", decwind2, opts={"start":1.0,"end":0.0}))
decdoor_d = decorations.add_child(bones.c_bone_drawable("dec_door", stroke=(0,0,0) ))
decdoor_d.add_child(bones.c_bone_ref("d0", decdoor0, opts={"start":1.0,"end":0.0}))
return ship
| {
"repo_name": "embisi-github/gjslib",
"path": "tests/sample_ship.py",
"copies": "1",
"size": "17539",
"license": "apache-2.0",
"hash": -1542440475179620000,
"line_mean": 95.9005524862,
"line_max": 165,
"alpha_frac": 0.3796111523,
"autogenerated": false,
"ratio": 3.588175122749591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9269723347051346,
"avg_score": 0.03961258559964892,
"num_lines": 181
} |
#a Imports
import math
#f vector_sq
def vector_sq( v ):
return v[0]*v[0] + v[1]*v[1]
#f vector_dot_product
def vector_dot_product( v0, v1 ):
return v0[0]*v1[0] + v0[1]*v1[1]
#f find_collision_time_circle_circle
def find_collision_time_circle_circle( p0, v0, r0, p1, v1, r1 ):
"""
Find the time (0<t<=1) at which p0+v0 and p1+v1 reach distance r0+r1 apart
Distance = | (p1+t.v1 - (p0+t.v0)) | = r0+r1
(r0+r1)^2 = (p1-p0 + t*(v1-v0)) . (p1-p0 + t*(v1-v0))
p10 = p1-p0
v10 = v1-v0
r10sq = (r0+r1)^2
r10sq = (p10 + t*v10) . (p10 + t*v10) = p10^2 + t^2*v10^2 + 2*t*p10.v10
v10^2*t^2 + 2*p10.v10*t + p10^2-r10sq = 0
t = (-2v10.p10 +- sqrt( 4*(p10.v10)^2 - 4*v10^2*(p10^2-r10sq) ) ) / (2v10^2)
t = (-v10.p10 +- sqrt( (p10.v10)^2 - (v10^2*(p10^2-r10sq)) ) ) / v10^2
if ((p10.v10)^2 - (v10^2*(p10^2-r10sq)) < 0 then no collision _EVER_
So, if 0<t<=1 and
t = (-v10.p10 +- sqrt( (p10.v10)^2 - (v10^2*(p10^2-r10sq)) ) ) / v10^2
[ discriminant = (p10.v10)^2 - (v10^2*(p10^2-r10sq)) ]
v10^2.t = sqrt( (p10.v10)^2 - (v10^2*(p10^2-r10sq)) ) - v10.p10 is from 0 to v10^2
Example: p10 is (3,0), v10 is (-1,0), and r0=r1=1
2^2 = ((3,0) + t*(-1,0)) . ((3,0) + t*(-1,0)) = 9 + t^2 - 6t
Hence
t^2 - 6t + 5 = 0
=> t = 1 or 5
p10.v10 = -3 ; p10^2 = 9 ; r10sq = 4 ; v10^2 = 1
discriminant = 9 - 1*(9-4) = 4
"""
p10 = ( p1[0]-p0[0], p1[1]-p0[1] )
v10 = ( v1[0]-v0[0], v1[1]-v0[1] )
v10sq = vector_sq( v10 )
if v10sq<1e-20: return None
p10sq = vector_sq( p10 )
r10sq = (r0+r1)*(r0+r1)
#print p10sq, v10sq, r10sq
p10_dot_v10 = vector_dot_product( p10, v10 )
#print p10_dot_v10
discriminant = p10_dot_v10*p10_dot_v10 - v10sq*(p10sq-r10sq)
#print p10, v10
#print "Discriminant", discriminant
if discriminant<0: return None
v10sq_times_t = -math.sqrt(discriminant) - p10_dot_v10
#print "v10sq_times_t b", v10sq_times_t
if (v10sq_times_t>0) and (v10sq_times_t<=v10sq): return v10sq_times_t/v10sq
v10sq_times_t = math.sqrt(discriminant) - p10_dot_v10
#print "v10sq_times_t a", v10sq_times_t
if (v10sq_times_t>0) and (v10sq_times_t<=v10sq): return v10sq_times_t/v10sq
return None
#c c_transform_matrix
class c_transform_matrix( object ):
def __init__( self, a,b,c,d ):
self.values = (a,b,c,d)
pass
def determinant( self ):
return self.values[0]*self.values[3] - self.values[1]*self.values[2]
def apply( self, x, y ):
return ( self.values[0]*x+self.values[1]*y,
self.values[2]*x+self.values[3]*y )
def scale( self, s ):
self.values = ( self.values[0]*s,
self.values[1]*s,
self.values[2]*s,
self.values[3]*s,
)
pass
def __repr__( self ):
return str(self.values)
#f find_collision_time_rectangle_corners_line
def find_collision_time_rectangle_corners_line( p, v, x, y, ls, ld ):
"""
Find the smallest time (0<t<=1) at which corners p+v.t + -1/1.x + -1/1y hit line -1/1.ls + k.ld
Collide at p + dx.x + dy.y + t.v = dl.ls + k.ld
Or k.ld - t.v = p + dx.x + dy.y - dl.ls
where dx = -1/1, dy=-1/1, dl=-1/1, -1<=k<=1, 0<t<=1
We can solve this with:
k.ld - t.v = | ldx -vx | | k | = | px | + ...
| ldy -vy | | t | = | py |
(-ldx*vy + ldy*vx)| k | = | -vy vx || px | + ...
| t | = | -ldy ldx || py |
need determinant = (ldy*vx - ldx*vy)
and transform = 1/determinant * [ vy -vx | -ldy ldx ]
and p_t = transform * p, x_t = transform*x, y_t = transform*y, ls_t = transform*ls
then determinant*t = p_tx + dx.x_tx + dy.y_tx - dl.ls_tx
and determinant*k = p_ty + dx.x_ty + dy.y_ty - dl.ls_ty
If corners are (2,0)+-(0.5,0)+-(0,0.5) + (-2,0).t and line is (0.5,0)+k.(0,0.5)
expect dx=-1,dy=1,t=0.5 we have (0.5,0.5) and k=1.
determinant = (0.5*-2) - (0*0) = -1
transform = [0 2 | 0.5 0]
"""
print "Testing corners of %s+t.%s +-%s +-%s against line %s + k.%s"%(str(p),str(v),str(x),str(y),str(ls),str(ld))
transform = c_transform_matrix( -v[1], v[0], -ld[1], ld[0] )
determinant = transform.determinant()
if (determinant>-1E-9) and (determinant<1E-9): return None
transform.scale(1/determinant)
p_t = transform.apply( p[0], p[1] )
x_t = transform.apply( x[0], x[1] )
y_t = transform.apply( y[0], y[1] )
ls_t = transform.apply( ls[0], ls[1] )
min_t = 2
for (dx, dy, dl) in ( (-1,-1,-1), (-1,-1,1),
(-1,1,-1), (-1,1,-1),
(1,-1,-1), (1,-1,-1),
(1,1,-1), (1,1,-1), ):
t = (p_t[1] + dx*x_t[1] + dy*y_t[1] - dl*ls_t[1] )
if False:
k = (p_t[0] + dx*x_t[0] + dy*y_t[0] + dl*ls_t[0] )
print dx,dy,dl,t,k, (p[0]+dx*x[0]+dy*y[0]+v[0]*t, p[1]+dx*x[1]+dy*y[1]+v[1]*t), (dl*ls[0]+ld[0]*k,dl*ls[1]+ld[1]*k)
pass
if (t>0) and (t<=1) and (t<min_t):
k = (p_t[0] + dx*x_t[0] + dy*y_t[0] - dl*ls_t[0] )
if (-1<=k) and (k<=1):
# corner 0 if dx,dy=1,1; 1 if dx,dy=-1,1; 2 if dx,dy=-1,-1, 3 if dx,dy=1,-1
# corner n = (1-dy)+(1-dx*dy)/2
# line 0 if dl is 1, 1 if dl is -1
min_t = t
pass
pass
pass
if min_t>1: return None
#print "T:",min_t
return min_t
#f find_collision_time_rectangle_rectangle
def find_collision_time_rectangle_rectangle( p0, v0, x0, p1, v1, x1 ):
"""
A rectangle is p0 + -1/1.x0 + -1/1.y0, where y0 = rot(90)*x0
if x0 = (x0x, x0y) then y0 = (-x0y, x0x)
Find the smallest time (0<t<=1) at which corners of rect0 hit lines of rect1 or vice versa
Need to try 4 setups:
corners of p0+v0+ -1/1.x0 + -1/1.y0 hit lines p1+v1 + -1/1.x1 + k.y1
corners of p0+v0+ -1/1.x0 + -1/1.y0 hit lines p1+v1 + -1/1.y1 + k.x1
corners of p1+v1+ -1/1.x1 + -1/1.y1 hit lines p0+v0 + -1/1.x0 + k.y0
corners of p1+v1+ -1/1.x1 + -1/1.y1 hit lines p0+v0 + -1/1.y0 + k.x0
"""
y0 = (-x0[1], x0[0])
y1 = (-x1[1], x1[0])
p10 = ( p1[0]-p0[0], p1[1]-p0[1] )
v10 = ( v1[0]-v0[0], v1[1]-v0[1] )
min_t = 2
t = find_collision_time_rectangle_corners_line( p10, v10, x1, y1, x0, y0 )
if (t is not None) and (t<min_t): min_t = t
find_collision_time_rectangle_corners_line( p10, v10, x1, y1, y0, x0 )
if (t is not None) and (t<min_t): min_t = t
find_collision_time_rectangle_corners_line( p10, v10, x0, y0, x1, y1 )
if (t is not None) and (t<min_t): min_t = t
find_collision_time_rectangle_corners_line( p10, v10, x0, y0, y1, x1 )
if (t is not None) and (t<min_t): min_t = t
if (min_t>1): return None
return min_t
#f find_velocities_after_bounce
def find_velocities_after_bounce( cor, p0, v0, m0, p1, v1, m1 ):
"""
For two colliding masses at p0/p1 with velocities v0/v1 and masses m0/m1,
calculate the resulting velocities after the collision given a coefficient
of restition of cor
First find the line of collision - which is along p1-p0
Generate unit vector p10u (and hence perpendicular p10n)
Then resolve the velocities into components along the line of collision and perpendicular
(cv0 = v0.p10u, pv0 = v0.p10n)
We note then that v0 = cv0*p10 + pv0*p10n and same for v1/cv1/pv1.
Then note that closing velocity = cv1-cv0 (which it should be clear is <0 as it reduces p1-p0)
The post-collision closing velocity = -cor.(cv1-cv0) = ncv1-ncv0
Also, m0.cv0+m1.cv1 = m0.ncv0 + m1.ncv1, by conservation of momentum
Note finally that the post-collision velocity is
nv0 = ncv0*p10u + pv0*p10n
And hence nv0 = v0 + (ncv0-cv0) * p10u
We can rewrite ncv0-cv0 as dcv0
So, rewriting the momentum equation we get
m0.ncv0 + m1.ncv1 = m0.cv0 + m1.cv1
m0.(ncv0-cv0) + m1.(ncv1-cv1) = 0
m1.dcv1 + m0.dcv0 = 0
And rewriting the cor equation we get
ncv1 - ncv0 = cor.cv0 - cor.cv1
(ncv1-cv1) - (ncv0-cv0) = (cor+1).cv0 - (cor+1).cv1
dcv0 - dcv1 = (cor+1).cv1 - (cor+1).cv0
Putting into an alternate form
| 1 -1 | (dcv0, dcv1) = ( (1+cor)(cv1 - cv0), 0 )
| m0 m1 |
Solving by inverting the matrix (1 -1, m0 m1)
(dcv0, dcv1) = 1/(m1+m0) | m1 1 | ( (1+cor)(cv1-cv0), 0 )
| -m0 1 |
A last note is that (cv1-cv0) = (v1-v0).p10u
Now plug in ncv0 and ncv1 to get the new velocities
nv0 = v0 + dcv0 * p10u
nv1 = v1 + dcv1 * p10u
"""
p10 = ((p1[0]-p0[0]), (p1[1]-p0[1]))
len_p10 = math.sqrt(p10[0]*p10[0] + p10[1]*p10[1])
if len_p10<1E-9:
raise Exception("Collision of two objects which are at the same spot has no meaning")
p10u = (p10[0]/len_p10, p10[1]/len_p10)
#print "p10u",str(p10u)
v10 = ((v1[0]-v0[0]), (v1[1]-v0[1]))
cv10 = v10[0]*p10u[0] + v10[1]*p10u[1]
#print "cv10u",cv10
transform = c_transform_matrix( m1, 1, -m0, 1 )
transform.scale(1.0/(m1+m0))
#print transform
dcv = transform.apply( (1+cor)*cv10, 0 )
#print "dcv",str(dcv)
dv0 = ( dcv[0]*p10u[0], dcv[0]*p10u[1] )
dv1 = ( dcv[1]*p10u[0], dcv[1]*p10u[1] )
return ( dv0, dv1 )
| {
"repo_name": "embisi-github/gjslib",
"path": "python/gjslib/math/collision.py",
"copies": "1",
"size": "9383",
"license": "apache-2.0",
"hash": -6972021953243486000,
"line_mean": 34.4075471698,
"line_max": 127,
"alpha_frac": 0.5314931259,
"autogenerated": false,
"ratio": 2.2807486631016043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3312241789001604,
"avg_score": null,
"num_lines": null
} |
#a Imports
import pygame.image
#a Simple background
#c c_image
class c_image( object ):
def __init__( self, **kwargs ):
self.image = None
self.scaled_image = None
self.size = (0,0)
self.scaled_size = (0,0)
self.last_scale = 1
pass
def load( self, image_filename ):
self.image = pygame.image.load( image_filename )
self.size = self.image.get_rect().size
self.scaled_image = self.image
self.scaled_size = self.size
return self
def scale( self, by=None, width=None, height=None ):
if by is not None:
width = by * self.size[0]
height = by * self.size[1]
pass
if height is None: height=width
self.last_scale = width / self.size[0]
self.scaled_image = pygame.transform.scale( self.image, (width, height) )
self.scaled_size = (width, height)
return self
def blit( self, surface, position=(0,0) ):
"""
Put centered at world position
"""
surface.blit( self.scaled_image, (position[0]-self.size[0]/2, position[1]+self.size[1]/2) )
pass
def rotated_blit( self, surface, position=(0,0), angle=0 ):
"""
Put centered at world position rotated by angle 'angle'
We use the scaled image, as that is required.
The rotated image will be 'scaled_image' size bigger, plus additional room to account for the 'square' bits that fall outside
So the rotated image / last scale is the world size of the image, and move it by half of that
"""
rotated_image = pygame.transform.rotate( self.scaled_image, angle )
rotated_size = rotated_image.get_rect().size
surface.blit( rotated_image, (position[0]-rotated_size[0]/2/self.last_scale, position[1]+rotated_size[1]/2/self.last_scale) )
if False:
surface.rect( (position[0]-rotated_size[0]/2/self.last_scale,
position[1]-rotated_size[1]/2/self.last_scale,
position[0]+rotated_size[0]/2/self.last_scale,
position[1]+rotated_size[1]/2/self.last_scale,
) )
pass
| {
"repo_name": "embisi-github/gjslib",
"path": "python/gjslib/graphics/image.py",
"copies": "1",
"size": "2219",
"license": "apache-2.0",
"hash": 1914557989559335700,
"line_mean": 40.8679245283,
"line_max": 133,
"alpha_frac": 0.5808922938,
"autogenerated": false,
"ratio": 3.6617161716171616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47426084654171613,
"avg_score": null,
"num_lines": null
} |
#a Imports
#c Classes
class c_overlap_2d( object ):
"""
This class manages overlapping objects each with a 2D bounding box
The class is given a set of objects, each with its bounding box
A caller can then interrograte the class to find a list of all the objects in the class' set which overlap a given bounding box
This can be used, for example, with the class containing the walls and floors of a platform game, and the class being
interrogated to find the wall/floor elements that a player bounding box overlaps with, to determine movement capabilities of the player.
The objects in the class' set are called 'elements'.
The basic overlap class splits the 2D space into 'buckets'. Each bucket is a bucket_size * bucket_size region.
Each bucket contains a list of the elements whose bounding box overlaps with that bucket.
An element may be in the list of more than one bucket.
Buckets are from (nx*bucket_size, ny*bucket_size) inclusive to ((n+1)x*bucket_size, (n+1)y*bucket_size) exclusive
"""
#f __init__
def __init__( self, bucket_size=32 ):
self.bucket_size = bucket_size
self.buckets = {}
pass
#f bucket_index
def bucket_index( self, coord, inclusive=True ):
"""
Determine the bucket index (X or Y - buckets are square regions) for a coordinate
if inclusive is False, then round down - effectively 'find bucket index for just less than coord'
"""
if inclusive:
return (coord/self.bucket_size)
return (coord+self.bucket_size-1)/self.bucket_size
# bucket_bbox
def bucket_bbox( self, bbox ):
"""
Given a bounding box that is (inclusive,inclusive,exclusive,exclusive) in world coordinates,
find the bucket index bounding box that is (inclusive,inclusive,inclusive,inclusive) that covers it
"""
bucket_bbox = (self.bucket_index( bbox[0], True ),
self.bucket_index( bbox[1], True ),
self.bucket_index( bbox[2], False ),
self.bucket_index( bbox[3], False ) )
return bucket_bbox
# add_element
def add_element( self, bbox, element ):
"""
Given a bounding box that is (inclusive,inclusive,exclusive,exclusive) coordinates, add the element to all the buckets it overlaps with
The process is to determine bucket_bbox - the buckets that the element needs to be part of, and then
add to the bucket lists for each of those buckets
"""
bucket_bbox = self.bucket_bbox( bbox )
for x in range( bucket_bbox[2]-bucket_bbox[0]+1 ):
for y in range( bucket_bbox[3]-bucket_bbox[1]+1 ):
if (x,y) not in self.buckets: self.buckets[(x,y)]=[]
self.buckets[(x,y)].append( (bbox,element) )
pass
pass
pass
#f remove_element
def remove_element( self, bbox, element ):
"""
Given a bounding box that is (inclusive,inclusive,exclusive,exclusive) coordinates, remove the element to all the buckets it overlaps with
The process is to determine bucket_bbox - the buckets that the element needs to be part of, and then
remove the element from the bucket lists for each of those buckets if it is in them
"""
bucket_bbox = self.bucket_bbox( bbox )
for x in range( bucket_bbox[2]-bucket_bbox[0]+1 ):
for y in range( bucket_bbox[3]-bucket_bbox[1]+1 ):
if (x,y) in self.buckets:
bucket = self.buckets[(x,y)]
i = 0
while i<len(bucket):
if bucket[i][1] == element:
bucket.pop(i)
break
i += 1
pass
pass
pass
pass
pass
#f check_bbox_overlap
def check_bbox_overlap( self, bbox0, bbox1 ):
"""
Given real-world coordinates, with bounding boxes as (inclusive,inclusive,exclusive,exclusive),
determine if the bounding boxes overlap
The bounding boxes do not overlap if one is to the left of the other, or if one is to above the other
However, if none of these hold, then there must be some overlap
"""
# Not overlapping if either box is to the right or below the other box
if bbox0[0] >= bbox1[2]: return False
if bbox0[1] >= bbox1[3]: return False
if bbox1[0] >= bbox0[2]: return False
if bbox1[1] >= bbox0[3]: return False
return True
#f find_overlap
def find_overlap( self, bbox ):
"""
Find the list of elements that overlap a bounding box in real-world coordinates (inclusive,inclusive,exclusive,exclusive)
To do this, find the buckets that the bounding box covers.
Then for each bucket, check every element in the bucket to see if it overlaps the test bbox
If it does, add the element to the result list - if it is not already in the list
"""
results = []
bucket_bbox = self.bucket_bbox( bbox )
for x in range( bucket_bbox[2]-bucket_bbox[0]+1 ):
for y in range( bucket_bbox[3]-bucket_bbox[1]+1 ):
if (x,y) in self.buckets:
for (tgt_bbox, tgt_element) in self.buckets[(x,y)]:
if self.check_bbox_overlap( bbox, tgt_bbox ):
if tgt_element not in results:
results.append(tgt_element)
pass
pass
pass
pass
pass
pass
return results
| {
"repo_name": "embisi-github/gjslib",
"path": "python/gjslib/math/overlap.py",
"copies": "1",
"size": "5803",
"license": "apache-2.0",
"hash": 1017177414628081700,
"line_mean": 44.6929133858,
"line_max": 146,
"alpha_frac": 0.5891780114,
"autogenerated": false,
"ratio": 4.416286149162861,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023317468559088325,
"num_lines": 127
} |
#Aim - Program to read the logs generated by YCSB and generate the graphs for read latency, update latency and throughput
#Assumption - File Name of logs should be XXXXrunYYYY_ZZZZ.out
#XXXX - DBName
#YYYY - Desired Throughput
#ZZZZ - Threads
#Author - Naman Aggarwal
import os
import re
from os.path import isfile, join
from matplotlib import pyplot as pp
# ---------- EDIT THESE TO CHANGE THE LOG DIRECTORY AND WHERE THE FILES SHOULD BE SAVED ----------------------
directory = "/home/naman/dd/hbasefiles"
readsavefile="/home/naman/dd/hbase-read-vs-throughput"
updatesavefile="/home/naman/dd/hbase-update-vs-throughput"
throughputsavefile="/home/naman/dd/hbase-throughput-vs-throughput"
colors = [ 'bo--', 'go-', 'ro--', 'co-', 'mo--', 'yo-', 'ko--','bo-', 'go--', 'ro-', 'co--', 'mo-', 'yo--', 'ko-' ]
maxth=0
maxrl=0
maxul=0
maxdt=0
def readFiles():
global maxth, maxrl, maxul, maxdt
cwd = directory
lst = {}
for f in os.listdir(cwd):
fpath = join(cwd,f)
if isfile(fpath) and f[-3:] == "out":
rindex = f.find("run")
uindex = f.find("_")
dtype, dthroughput, dthread = f[:rindex],int(f[rindex+3:uindex]),int(f[uindex+1:-4])
otregex = re.compile("^\[OVERALL.*Throughput.*",re.M);
ulregex = re.compile("^\[UPDATE.*AverageLatency.*",re.M);
rlregex = re.compile("^\[READ.*AverageLatency.*",re.M);
strfile = open(fpath,"r").read()
otline = otregex.search(strfile)
ulline = ulregex.search(strfile)
rlline = rlregex.search(strfile)
throughput, ulatency, rlatency = float(otline.group(0).split(",")[2]), float(ulline.group(0).split(",")[2]), float(rlline.group(0).split(",")[2])
if not dthread in dict.keys(lst):
lst[dthread] = {}
lst[dthread][dthroughput] = [ulatency,rlatency,throughput]
if throughput > maxth:
maxth = throughput
if ulatency > maxul:
maxul = ulatency
if rlatency > maxrl:
maxrl = rlatency
if dthroughput > maxdt:
maxdt = dthroughput
nlst = {}
for thread in lst:
nlst[thread] = []
for i in range(0,4):
nlst[thread].append([])
for th in sorted(lst[thread]):
nlst[thread][0].append(th)
nlst[thread][1].append(lst[thread][th][0])
nlst[thread][2].append(lst[thread][th][1])
nlst[thread][3].append(lst[thread][th][2])
return nlst
def createReadLatencyGraph(data):
pp.figure(1)
count=0
for thread in sorted(data.keys()):
pp.plot(data[thread][3],data[thread][2],colors[count],label="Threads = "+str(thread))
count+=1
pp.grid(axis='both')
pp.xlabel('Achieved Throughput (operations/second)')
pp.ylabel('Average Read Latency (milliseconds)')
pp.axis([0, 1.1 * maxth , 0, 1.1*maxrl ])
pp.title('HBase Read Latency vs Achieved Throughput at different number of threads for 300000 operations and 25000000 record count')
pp.legend(loc=2)
save(readsavefile)
def createUpdateLatencyGraph(data):
pp.figure(2)
count=0
for thread in sorted(data.keys()):
pp.plot(data[thread][3],data[thread][1],colors[count],label="Threads = "+str(thread))
count+=1
pp.grid(axis='both')
pp.xlabel('Overall Achieved Throughput (operations/second)')
pp.ylabel('Average Update Latency (milliseconds)')
pp.axis([0, 1.1 * maxth , 0, 1.5*maxul ])
pp.title('HBase Update Latency vs Achieved Throughput at different number of threads for 300000 operations and 25000000 record count')
pp.legend(loc=2)
save(updatesavefile)
def createThroughputGraph(data):
pp.figure(3)
count=0
for thread in sorted(data.keys()):
pp.plot(data[thread][0],data[thread][3],colors[count],label="Threads = "+str(thread))
count+=1
pp.grid(axis='both')
pp.xlabel('Target Throughput (operations/second)')
pp.ylabel('Overall Achieved Throughput (operations/second)')
pp.axis([0, 1.1*maxdt , 0, 1.1*maxth ])
pp.title('HBase Achieved Throughput vs Target Throughput at different number of threads for 300000 operations and 25000000 record count')
pp.legend(loc=2)
save(throughputsavefile)
#This function saves the plot in a file
#This is contributed by Siddharth Goel (National University of Singapore)
def save(path, ext='png', close=True, verbose=True):
# Extract the directory and filename from the given path
directory = os.path.split(path)[0]
filename = "%s.%s" % (os.path.split(path)[1], ext)
if directory == '':
directory = '.'
# If the directory does not exist, create it
if not os.path.exists(directory):
os.makedirs(directory)
# The final path to save to
savepath = os.path.join(directory, filename)
if verbose:
print("Saving figure to '%s'..." % savepath)
pp.gcf().set_size_inches(18.5,10.5)
# Actually save the figure
pp.savefig(savepath, figsize=(50, 40), dpi=80)
# Close it
if close:
pp.close()
if verbose:
print("Done")
def main():
#Read the log files
data = readFiles()
#Create the read latecy vs throughput graph
createReadLatencyGraph(data)
#Create update latency vs throughput graph
createUpdateLatencyGraph(data)
#Create achieved throughput vs desired throughput graph
createThroughputGraph(data)
if __name__=="__main__":
main()
| {
"repo_name": "namaggarwal/ycsb-autograph-generator",
"path": "create_graph.py",
"copies": "1",
"size": "5357",
"license": "mit",
"hash": 9039027597842200000,
"line_mean": 32.48125,
"line_max": 148,
"alpha_frac": 0.6455105469,
"autogenerated": false,
"ratio": 3.1236151603498543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4269125707249854,
"avg_score": null,
"num_lines": null
} |
# Aims to provide functions for fast periodic daubechies transforms (forward and inverse) in 2D
# https://github.com/amitgroup/amitgroup/tree/master/amitgroup/util/wavelet
import numpy as np
import scipy
import scipy.sparse
SPARSITY_THRESHOLD = 256
def _populate(W, filtr, yoffset):
N = len(filtr)
for i in range(W.shape[1]//2):
for j in range(N):
W[yoffset+i, (-(N-2)//2+2*i+j)%W.shape[1]] += filtr[j]
def _create_W(shape, level, filter_low, filter_high):
d = 1 << (level-1)
sh = (shape[0]//d, shape[1]//d)
if np.min(sh) >= SPARSITY_THRESHOLD:
W = scipy.sparse.lil_matrix(sh)
else:
W = np.asmatrix(np.zeros(sh))
_populate(W, filter_low, 0)
_populate(W, filter_high, shape[0]//(2*d))
if scipy.sparse.issparse(W):
return W.tocsr() # Faster if you're not changing it anymore
else:
return W
def _create_single(shape, level, filtr):
d = 1 << (level-1)
sh = (shape[0]//(2*d), shape[1]//d)
if np.min(sh) >= SPARSITY_THRESHOLD:
GH = scipy.sparse.lil_matrix(sh)
else:
GH = np.asmatrix(np.zeros(sh))
_populate(GH, filtr, 0)
if scipy.sparse.issparse(GH):
return GH.tocsr() # Faster if you're not changing it anymore
else:
return GH
def _qdot(X, A):
return X * A * X.T
_db_kernels = {
'haar': np.array([0.70710678118654757, 0.70710678118654757]),
'db1': np.array([0.70710678118654757, 0.70710678118654757]),
'db2': np.array([0.48296291314469025, 0.83651630373746899, 0.22414386804185735, -0.12940952255092145]), # D4
'db3': np.array([0.33267055295095688, 0.80689150931333875, 0.45987750211933132, -0.13501102001039084, -0.085441273882241486, 0.035226291882100656]),
'db4': np.array([0.23037781330885523, 0.71484657055254153, 0.63088076792959036, -0.027983769416983849, -0.18703481171888114, 0.030841381835986965, 0.032883011666982945, -0.010597401784997278]),
'db5': np.array([0.16010239797412501, 0.60382926979747287, 0.72430852843857441, 0.13842814590110342, -0.24229488706619015, -0.03224486958502952, 0.077571493840065148, -0.0062414902130117052, -0.012580751999015526, 0.0033357252850015492]),
'db6': np.array([0.11154074335008017, 0.49462389039838539, 0.75113390802157753, 0.3152503517092432, -0.22626469396516913, -0.12976686756709563, 0.097501605587079362, 0.027522865530016288, -0.031582039318031156, 0.0005538422009938016, 0.0047772575110106514, -0.0010773010849955799]),
'db7': np.array([0.077852054085062364, 0.39653931948230575, 0.72913209084655506, 0.4697822874053586, -0.14390600392910627, -0.22403618499416572, 0.071309219267050042, 0.080612609151065898, -0.038029936935034633, -0.01657454163101562, 0.012550998556013784, 0.00042957797300470274, -0.0018016407039998328, 0.00035371380000103988]),
'db8': np.array([0.054415842243081609, 0.31287159091446592, 0.67563073629801285, 0.58535468365486909, -0.015829105256023893, -0.28401554296242809, 0.00047248457399797254, 0.12874742662018601, -0.017369301002022108, -0.044088253931064719, 0.013981027917015516, 0.0087460940470156547, -0.0048703529930106603, -0.00039174037299597711, 0.00067544940599855677, -0.00011747678400228192]),
'db9': np.array([0.038077947363167282, 0.24383467463766728, 0.6048231236767786, 0.65728807803663891, 0.13319738582208895, -0.29327378327258685, -0.096840783220879037, 0.14854074933476008, 0.030725681478322865, -0.067632829059523988, 0.00025094711499193845, 0.022361662123515244, -0.004723204757894831, -0.0042815036819047227, 0.0018476468829611268, 0.00023038576399541288, -0.00025196318899817888, 3.9347319995026124e-005]),
'db10': np.array([0.026670057900950818, 0.18817680007762133, 0.52720118893091983, 0.68845903945259213, 0.28117234366042648, -0.24984642432648865, -0.19594627437659665, 0.12736934033574265, 0.093057364603806592, -0.071394147165860775, -0.029457536821945671, 0.033212674058933238, 0.0036065535669883944, -0.010733175482979604, 0.0013953517469940798, 0.0019924052949908499, -0.00068585669500468248, -0.0001164668549943862, 9.3588670001089845e-005, -1.3264203002354869e-005]),
'db11': np.array([0.018694297761470441, 0.14406702115061959, 0.44989976435603013, 0.68568677491617847, 0.41196436894789695, -0.16227524502747828, -0.27423084681792875, 0.066043588196690886, 0.14981201246638268, -0.04647995511667613, -0.066438785695020222, 0.031335090219045313, 0.020840904360180039, -0.015364820906201324, -0.0033408588730145018, 0.0049284176560587777, -0.00030859285881515924, -0.00089302325066623663, 0.00024915252355281426, 5.4439074699366381e-005, -3.4634984186983789e-005, 4.4942742772363519e-006]),
'db12': np.array([0.013112257957229239, 0.10956627282118277, 0.37735513521420411, 0.65719872257929113, 0.51588647842780067, -0.044763885653777619, -0.31617845375277914, -0.023779257256064865, 0.18247860592758275, 0.0053595696743599965, -0.09643212009649671, 0.010849130255828966, 0.041546277495087637, -0.01221864906974642, -0.012840825198299882, 0.0067114990087955486, 0.0022486072409952287, -0.0021795036186277044, 6.5451282125215034e-006, 0.00038865306282092672, -8.8504109208203182e-005, -2.4241545757030318e-005, 1.2776952219379579e-005, -1.5290717580684923e-006]),
'db13': np.array([0.0092021335389622788, 0.082861243872901946, 0.31199632216043488, 0.61105585115878114, 0.58888957043121193, 0.086985726179645007, -0.31497290771138414, -0.12457673075080665, 0.17947607942935084, 0.072948933656788742, -0.10580761818792761, -0.026488406475345658, 0.056139477100276156, 0.0023799722540522269, -0.023831420710327809, 0.0039239414487955773, 0.0072555894016171187, -0.002761911234656831, -0.0013156739118922766, 0.00093232613086724904, 4.9251525126285676e-005, -0.00016512898855650571, 3.0678537579324358e-005, 1.0441930571407941e-005, -4.7004164793608082e-006, 5.2200350984547998e-007]),
'db14': np.array([0.0064611534600864905, 0.062364758849384874, 0.25485026779256437, 0.55430561794077093, 0.63118784910471981, 0.21867068775886594, -0.27168855227867705, -0.21803352999321651, 0.13839521386479153, 0.13998901658445695, -0.086748411568110598, -0.071548955503983505, 0.05523712625925082, 0.026981408307947971, -0.030185351540353976, -0.0056150495303375755, 0.012789493266340071, -0.00074621898926387534, -0.003849638868019787, 0.001061691085606874, 0.00070802115423540481, -0.00038683194731287514, -4.1777245770370672e-005, 6.875504252695734e-005, -1.0337209184568496e-005, -4.3897049017804176e-006, 1.7249946753674012e-006, -1.7871399683109222e-007]),
'db15': np.array([0.0045385373615773762, 0.046743394892750617, 0.20602386398692688, 0.49263177170797529, 0.64581314035721027, 0.33900253545462167, -0.19320413960907623, -0.28888259656686216, 0.065282952848765688, 0.19014671400708816, -0.039666176555733602, -0.11112093603713753, 0.033877143923563204, 0.054780550584559995, -0.025767007328366939, -0.020810050169636805, 0.015083918027862582, 0.0051010003604228726, -0.0064877345603061454, -0.00024175649075894543, 0.0019433239803823459, -0.00037348235413726472, -0.00035956524436229364, 0.00015589648992055726, 2.579269915531323e-005, -2.8133296266037558e-005, 3.3629871817363823e-006, 1.8112704079399406e-006, -6.3168823258794506e-007, 6.1333599133037138e-008]),
'db16': np.array([0.0031892209253436892, 0.034907714323629047, 0.1650642834886438, 0.43031272284545874, 0.63735633208298326, 0.44029025688580486, -0.089751089402363524, -0.32706331052747578, -0.02791820813292813, 0.21119069394696974, 0.027340263752899923, -0.13238830556335474, -0.0062397227521562536, 0.075924236044457791, -0.0075889743686425939, -0.036888397691556774, 0.010297659641009963, 0.013993768859843242, -0.0069900145633907508, -0.0036442796214883506, 0.00312802338120381, 0.00040789698084934395, -0.00094102174935854332, 0.00011424152003843815, 0.00017478724522506327, -6.103596621404321e-005, -1.394566898819319e-005, 1.133660866126152e-005, -1.0435713423102517e-006, -7.3636567854418147e-007, 2.3087840868545578e-007, -2.1093396300980412e-008]),
'db17': np.array([0.0022418070010387899, 0.025985393703623173, 0.13121490330791097, 0.37035072415288578, 0.61099661568502728, 0.5183157640572823, 0.027314970403312946, -0.32832074836418546, -0.12659975221599248, 0.19731058956508457, 0.10113548917744287, -0.12681569177849797, -0.057091419631858077, 0.081105986654080822, 0.022312336178011833, -0.046922438389378908, -0.0032709555358783646, 0.022733676583919053, -0.0030429899813869555, -0.0086029215203478147, 0.0029679966915180638, 0.0023012052421511474, -0.001436845304805, -0.00032813251941022427, 0.00043946542776894542, -2.5610109566546042e-005, -8.2048032024582121e-005, 2.3186813798761639e-005, 6.9906009850812941e-006, -4.5059424772259631e-006, 3.0165496099963414e-007, 2.9577009333187617e-007, -8.4239484460081536e-008, 7.2674929685663697e-009]),
'db18': np.array([0.0015763102184365595, 0.019288531724094969, 0.10358846582214751, 0.31467894133619284, 0.57182680776508177, 0.57180165488712198, 0.14722311196952223, -0.29365404073579809, -0.21648093400458224, 0.14953397556500755, 0.16708131276294505, -0.092331884150304119, -0.10675224665906288, 0.064887216212358198, 0.057051247739058272, -0.04452614190225633, -0.023733210395336858, 0.026670705926689853, 0.0062621679544386608, -0.013051480946517112, 0.00011863003387493042, 0.0049433436054565939, -0.0011187326669886426, -0.0013405962983313922, 0.00062846568296447147, 0.0002135815619103188, -0.00019864855231101547, -1.5359171230213409e-007, 3.7412378807308472e-005, -8.5206025374234635e-006, -3.3326344788769603e-006, 1.7687129836228861e-006, -7.691632689865049e-008, -1.1760987670250871e-007, 3.0688358630370302e-008, -2.5079344549419292e-009]),
'db19': np.array([0.0011086697631864314, 0.01428109845082521, 0.08127811326580564, 0.26438843174202237, 0.52443637746688621, 0.60170454913009164, 0.26089495265212009, -0.22809139421653665, -0.28583863175723145, 0.074652269708066474, 0.21234974330662043, -0.033518541903202262, -0.14278569504021468, 0.027584350624887129, 0.086906755555450702, -0.026501236250778635, -0.045674226277784918, 0.021623767409452484, 0.019375549889114482, -0.013988388678695632, -0.0058669222811121953, 0.0070407473670804953, 0.00076895435922424884, -0.0026875518007344408, 0.00034180865344939543, 0.0007358025205041731, -0.00026067613568119951, -0.00012460079173506306, 8.7112704672504432e-005, 5.1059504870906939e-006, -1.6640176297224622e-005, 3.0109643163099385e-006, 1.5319314766978769e-006, -6.8627556577981102e-007, 1.4470882988040879e-008, 4.6369377758023682e-008, -1.1164020670405678e-008, 8.6668488390344833e-010]),
'db20': np.array([0.00077995361366591117, 0.010549394624937735, 0.063423780459005291, 0.21994211355113222, 0.47269618531033147, 0.61049323893785579, 0.36150229873889705, -0.13921208801128787, -0.32678680043353758, -0.016727088308801888, 0.22829105082013823, 0.039850246458519104, -0.15545875070604531, -0.024716827337521424, 0.10229171917513397, 0.0056322468576854544, -0.061722899624668884, 0.0058746818113949465, 0.032294299530119162, -0.0087893249245557647, -0.013810526137727442, 0.0067216273018096935, 0.0044205423867663502, -0.003581494259744107, -0.00083156217287724745, 0.0013925596193045254, -5.3497598443404532e-005, -0.0003851047486990061, 0.00010153288973669777, 6.7742808283730477e-005, -3.7105861833906152e-005, -4.3761438621821972e-006, 7.2412482876637907e-006, -1.0119940100181473e-006, -6.847079596993149e-007, 2.633924226266962e-007, 2.0143220235374613e-010, -1.8148432482976221e-008, 4.05612705554717e-009, -2.9988364896157532e-010]),
}
def _get_filters(wavelet):
global _db_kernels
try:
lowpass = _db_kernels[wavelet]
except KeyError:
raise ValueError("Wavelet type not supported: ('{0}')".format(wavelet))
highpass = lowpass[::-1].copy()
highpass[1::2] *= -1
return lowpass, highpass
def _arrange_filter_matrices(shape, wavelet):
filter_low, filter_high = _get_filters(wavelet)
assert len(shape) == 2
assert shape[0] == shape[1], "Shape must be square (at least for now)"
levels_list = range(1, int(np.log2(shape[0]))+1)
max_level = int(np.log2(max(shape)))
# Setup matrices
Ws = [_create_W(shape, level, filter_low, filter_high) for level in levels_list]
Wgs = []
# Combine all the matrices for the steps where we throw away the coefficients.
Wg = np.asmatrix(np.eye(shape[0], shape[1]))
for l in range(0, max_level):
new_M = Ws[l] * Wg
if np.min(new_M.shape) >= SPARSITY_THRESHOLD:
new_M = scipy.sparse.csr_matrix(new_M)
Wgs.append(new_M)
Wg = _create_single(shape, l+1, filter_low) * Wg
Wgs.append(Wg)
Wgs = Wgs[::-1]
WsT = [W.T for W in Ws]
WgsT = [Wg.T for Wg in Wgs]
return Wgs, WgsT, Ws, WsT, max_level
def daubechies_factory(shape, wavelet='db2'):
"""
Creates a forward and an inverse discrete wavelet transform function.
The function is specialized for a specific size and wavelet.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
shape : tuple
A tuple describing the size of the input, for instance ``(32, 32)``. Values must be powers of two.
wavelet : str
Type of wavelet described as a string. Supported values are ``'db1'``, ``'db2'``, ... ``'db20'``. What is called, for instance, `db3` is what is generally called the D6 wavelet (since it uses a kernel of size 6). The string ``'haar'`` is a synonym for ``'db1'``.
Returns
-------
wavedec2 : func(A[, levels])
Returns a function that takes an argument, `A`, input data that must be of the size specified above. It also takes an optional argument `levels`, where you can specify how many coefficient levels you plan to use. It will return an array with the coefficients of shape ``(2**levels, 2**levels)``.
waverec2 : func(coefs)
Returns a function that takes a single argument, `coefs`, the coefficients to use to reconstruct the spatial information.
Examples
--------
>>> import amitgroup as ag
>>> import amitgroup.util.wavelet
>>> import matplotlib.pylab as plt
>>> face = ag.io.load_example('faces')[0]
To compress a face and then inspect the results, let's first create the transform functions:
>>> wavedec2, waverec2 = ag.util.wavelet.daubechies_factory(face.shape, 'db8')
And then deconstruct a face to coefficients and the reconstruct it again. Since we only used 4 coefficient levels, information will be lost.
>>> new_face = waverec2(wavedec2(face, levels=4))
>>> ag.plot.images([face, new_face])
>>> plt.show()
"""
if isinstance(shape, int): # One dimensional!
Wgs, WgsT, Ws, WsT, max_level = _arrange_filter_matrices((shape, shape), wavelet)
def wavedec(A, levels=np.inf):
A = A.reshape((len(A), 1))
levels = min(max_level, levels)
coefs = Wgs[levels] * A
for l in range(levels-1, 0, -1):
N = 1 << l
coefs[:N] = Ws[max_level-l] * coefs[:N]
return np.asarray(coefs).flatten()
def waverec(coefs):
levels = int(np.log2(coefs.shape[0]))
A = coefs.reshape((len(coefs), 1)).copy()
for l in range(1, levels):
N = 1 << l
A[:N] = WsT[max_level-l] * A[:N]
A = WgsT[levels] * A
return np.asarray(A).flatten()
return wavedec, waverec
elif isinstance(shape, tuple) and len(shape) == 2: # 2 dimensional!
Wgs, WgsT, Ws, WsT, max_level = _arrange_filter_matrices(shape, wavelet)
def wavedec2(A, levels=np.inf):
levels = min(max_level, levels)
coefs = Wgs[levels] * A * WgsT[levels]
for l in range(levels-1, 0, -1):
N = 1 << l
L = max_level-l
coefs[:N,:N] = Ws[L] * coefs[:N,:N] * WsT[L]
return np.asarray(coefs)
def waverec2(coefs, levels=np.inf):
#print coefs.shape
levels = int(np.log2(coefs.shape[0]))
#levels = min(max_level, levels)
A = coefs.copy()
for l in range(1, levels):
N = 1 << l
L = max_level-l
A[:N,:N] = WsT[L] * A[:N,:N] * Ws[L]
return np.asarray(WgsT[levels] * A * Wgs[levels])
return wavedec2, waverec2
else:
raise ValueError("Shape must be either integer or tuple of size two")
# CACHED 1-D
################################################################################
_db_wavedec_cache = {}
_db_waverec_cache = {}
def wavedec(A, wavelet='db2', levels=np.inf, length=None):
"""
Performs a 1D wavelet decomposition (forward transform).
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet1d`
Parameters
----------
A : ndarray
1D input data. Length must be powers of two.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
levels : int
Specify how many levels of coefficients you plan to use. The default is ``np.inf``, which will default to the maximum number possible, which will make the coefficient array the same length as `A`. Notice that `levels` is zero-based, in the sense that entering 0 is valid and will the transform operate only on the energy-level coefficient.
"""
global _db_wavedec_cache, _db_waverec_cache
tup = (length or len(A), wavelet)
try:
dec = _db_wavedec_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec_cache[tup] = dec
_db_waverec_cache[tup] = rec
return dec(A, levels)
def waverec(coefs, wavelet='db2', length=None):
"""
Performs a 1D wavelet reconstruction (inverse transform).
In :func:`wavedec`, you specify `levels`, which is not done in this function since it can be inferred from the shape of `coefs`.
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet1d`
Parameters
----------
A : ndarray
1D input data. Length must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
"""
global _db_wavedec_cache, _db_waverec_cache
tup = (length or len(coefs), wavelet)
try:
rec = _db_waverec_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec_cache[tup] = dec
_db_waverec_cache[tup] = rec
return rec(coefs)
# CACHED 2-D
################################################################################
_db_wavedec2_cache = {}
_db_waverec2_cache = {}
def wavedec2(A, wavelet='db2', levels=np.inf, shape=None):
"""
Performs a 2D wavelet decomposition (forward transform).
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
A : ndarray
2D input data. Shape must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
levels : int
Specify how many levels of coefficients you plan to use. The default is ``np.inf``, which will default to the maximum number possible, which will make the coefficient array the same size as `A`. Notice that `levels` is zero-based, in the sense that entering 0 is valid and will the transform operate only on the energy-level coefficient.
"""
global _db_wavedec2_cache, _db_waverec2_cache
tup = (shape or A.shape, wavelet)
try:
dec = _db_wavedec2_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec2_cache[tup] = dec
_db_waverec2_cache[tup] = rec
return dec(A, levels)
def waverec2(coefs, wavelet='db2', shape=None):
"""
Performs a 2D wavelet reconstruction (inverse transform).
In :func:`wavedec2`, you specify `levels`, which is not done in this function since it can be inferred from the shape of `coefs`.
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
A : ndarray
2D input data. Shape must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
"""
global _db_wavedec2_cache, _db_waverec2_cache
tup = (shape or coefs.shape, wavelet)
try:
rec = _db_waverec2_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec2_cache[tup] = dec
_db_waverec2_cache[tup] = rec
return rec(coefs)
# HELPER FUNCTIONS
################################################################################
def smart_flatten(coefficients):
"""
This flattens 2D coefficients in a smart way, so that all coefficients levels are grouped into contiguous blocks, starting from the low-frequency coefficients going to the high-frequency ones.
Notice that 1D coefficients are already flat and sorted.
Parameters
----------
coefficients : ndarray
Wavelet coefficients returned by :func:`wavedec2`.
"""
assert coefficients.shape == (8, 8), "TODO: Has not been generalized, only works with shape (8, 8), not {0}".format(coefficients.shape)
olds = np.zeros(64)
olds[0] = coefficients[0,0]
olds[1] = coefficients[1,0]
olds[2] = coefficients[0,1]
olds[3] = coefficients[1,1]
olds[4:8] = coefficients[2:4,0:2].flatten()
olds[8:12] = coefficients[0:2,2:4].flatten()
olds[12:16] = coefficients[2:4,2:4].flatten()
olds[16:32] = coefficients[4:8,0:4].flatten()
olds[32:48] = coefficients[0:4,4:8].flatten()
olds[48:64] = coefficients[4:8,4:8].flatten()
return olds
def smart_deflatten(flatcoefs):
"""
Inverse function of :func:`smart_flatten`.
Parameters
----------
flatcoefs : ndarray
Flat array of coefficients returned by :func:`smart_flatten`.
"""
N = int(np.sqrt(len(flatcoefs)))
A = np.arange(N*N, dtype=int).reshape(N, N)
indices = new2old(A).astype(int)
new_indices = np.empty(indices.shape, dtype=int)
for i, index in enumerate(indices):
new_indices[index] = i
news = flatcoefs[new_indices].reshape(8, 8).copy()
return news
def structured_to_contiguous(structured_coefs):
"""
Converts a structured list-of-tuples-of-arrays-of-coefficients to a contiguous block.
The input format follows `PyWavelets <http://www.pybytes.com/pywavelets/>`_.
Works for both 1D and 2D coefficients.
Parameters
----------
structured_coefs : list
List of coefficients.
"""
in2d = structured_coefs[0][0].ndim == 2
if in2d:
N = 1 << (len(structured_coefs)-1)
u = np.zeros((N, N))
u[0,0] = float(structured_coefs[0])
for level, c in enumerate(structured_coefs):
if level != 0:
S = len(c[0])
u[S:2*S,:S] = c[0]
u[:S,S:2*S] = c[1]
u[S:2*S,S:2*S] = c[2]
return u
else:
N = 1 << (len(structured_coefs)-1)
u = np.zeros(N)
u[0] = float(structured_coefs[0])
for level, c in enumerate(structured_coefs):
if level != 0:
S = len(c)
u[S:2*S] = c
return u
def contiguous_to_structured(contiguous_coefs, levels=np.inf):
"""
Convert from continguous array to a structured format (identical to the one used in PyWavelets).
Works for both 1D and 2D coefficients.
Parameters
----------
contiguous_coefs : ndarray
Coefficients as returned by our wavelet functions.
levels : int, optional
If you don't want all levels, you can set this value to specify how many you want. Notice that this
refers to levels of `wavelet` coefficients, which means that the scaling coefficient is not included and
will always be returned, even if `levels` is set to zero.
"""
u = contiguous_coefs
in2d = contiguous_coefs.ndim == 2
N = int(np.log2(len(contiguous_coefs)))
coefs = []
if in2d:
coefs.append( contiguous_coefs[:1,:1] )
for level in range(min(levels, N)):
S = 1 << level
coefs.append( (contiguous_coefs[S:2*S,:S], contiguous_coefs[:S,S:2*S], contiguous_coefs[S:2*S,S:2*S]) )
else:
coefs.append( contiguous_coefs[:1] )
for level in range(min(levels, N)):
S = 1 << level
coefs.append( contiguous_coefs[S:2*S] )
return coefs | {
"repo_name": "aasensio/pyiacsun",
"path": "pyiacsun/sparse/wavelet.py",
"copies": "1",
"size": "25382",
"license": "mit",
"hash": -409426970546257100,
"line_mean": 54.90969163,
"line_max": 957,
"alpha_frac": 0.6892285872,
"autogenerated": false,
"ratio": 2.728073946689596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3917302533889596,
"avg_score": null,
"num_lines": null
} |
# Aims to provide functions for fast periodic daubechies transforms (forward and inverse) in 2D
import numpy as np
import scipy
import scipy.sparse
SPARSITY_THRESHOLD = 256
def _populate(W, filtr, yoffset):
N = len(filtr)
for i in range(W.shape[1]//2):
for j in range(N):
W[yoffset+i, (-(N-2)//2+2*i+j)%W.shape[1]] += filtr[j]
def _create_W(shape, level, filter_low, filter_high):
d = 1 << (level-1)
sh = (shape[0]//d, shape[1]//d)
if np.min(sh) >= SPARSITY_THRESHOLD:
W = scipy.sparse.lil_matrix(sh)
else:
W = np.asmatrix(np.zeros(sh))
_populate(W, filter_low, 0)
_populate(W, filter_high, shape[0]//(2*d))
if scipy.sparse.issparse(W):
return W.tocsr() # Faster if you're not changing it anymore
else:
return W
def _create_single(shape, level, filtr):
d = 1 << (level-1)
sh = (shape[0]//(2*d), shape[1]//d)
if np.min(sh) >= SPARSITY_THRESHOLD:
GH = scipy.sparse.lil_matrix(sh)
else:
GH = np.asmatrix(np.zeros(sh))
_populate(GH, filtr, 0)
if scipy.sparse.issparse(GH):
return GH.tocsr() # Faster if you're not changing it anymore
else:
return GH
def _qdot(X, A):
return X * A * X.T
_db_kernels = {
'haar': np.array([0.70710678118654757, 0.70710678118654757]),
'db1': np.array([0.70710678118654757, 0.70710678118654757]),
'db2': np.array([0.48296291314469025, 0.83651630373746899, 0.22414386804185735, -0.12940952255092145]), # D4
'db3': np.array([0.33267055295095688, 0.80689150931333875, 0.45987750211933132, -0.13501102001039084, -0.085441273882241486, 0.035226291882100656]),
'db4': np.array([0.23037781330885523, 0.71484657055254153, 0.63088076792959036, -0.027983769416983849, -0.18703481171888114, 0.030841381835986965, 0.032883011666982945, -0.010597401784997278]),
'db5': np.array([0.16010239797412501, 0.60382926979747287, 0.72430852843857441, 0.13842814590110342, -0.24229488706619015, -0.03224486958502952, 0.077571493840065148, -0.0062414902130117052, -0.012580751999015526, 0.0033357252850015492]),
'db6': np.array([0.11154074335008017, 0.49462389039838539, 0.75113390802157753, 0.3152503517092432, -0.22626469396516913, -0.12976686756709563, 0.097501605587079362, 0.027522865530016288, -0.031582039318031156, 0.0005538422009938016, 0.0047772575110106514, -0.0010773010849955799]),
'db7': np.array([0.077852054085062364, 0.39653931948230575, 0.72913209084655506, 0.4697822874053586, -0.14390600392910627, -0.22403618499416572, 0.071309219267050042, 0.080612609151065898, -0.038029936935034633, -0.01657454163101562, 0.012550998556013784, 0.00042957797300470274, -0.0018016407039998328, 0.00035371380000103988]),
'db8': np.array([0.054415842243081609, 0.31287159091446592, 0.67563073629801285, 0.58535468365486909, -0.015829105256023893, -0.28401554296242809, 0.00047248457399797254, 0.12874742662018601, -0.017369301002022108, -0.044088253931064719, 0.013981027917015516, 0.0087460940470156547, -0.0048703529930106603, -0.00039174037299597711, 0.00067544940599855677, -0.00011747678400228192]),
'db9': np.array([0.038077947363167282, 0.24383467463766728, 0.6048231236767786, 0.65728807803663891, 0.13319738582208895, -0.29327378327258685, -0.096840783220879037, 0.14854074933476008, 0.030725681478322865, -0.067632829059523988, 0.00025094711499193845, 0.022361662123515244, -0.004723204757894831, -0.0042815036819047227, 0.0018476468829611268, 0.00023038576399541288, -0.00025196318899817888, 3.9347319995026124e-005]),
'db10': np.array([0.026670057900950818, 0.18817680007762133, 0.52720118893091983, 0.68845903945259213, 0.28117234366042648, -0.24984642432648865, -0.19594627437659665, 0.12736934033574265, 0.093057364603806592, -0.071394147165860775, -0.029457536821945671, 0.033212674058933238, 0.0036065535669883944, -0.010733175482979604, 0.0013953517469940798, 0.0019924052949908499, -0.00068585669500468248, -0.0001164668549943862, 9.3588670001089845e-005, -1.3264203002354869e-005]),
'db11': np.array([0.018694297761470441, 0.14406702115061959, 0.44989976435603013, 0.68568677491617847, 0.41196436894789695, -0.16227524502747828, -0.27423084681792875, 0.066043588196690886, 0.14981201246638268, -0.04647995511667613, -0.066438785695020222, 0.031335090219045313, 0.020840904360180039, -0.015364820906201324, -0.0033408588730145018, 0.0049284176560587777, -0.00030859285881515924, -0.00089302325066623663, 0.00024915252355281426, 5.4439074699366381e-005, -3.4634984186983789e-005, 4.4942742772363519e-006]),
'db12': np.array([0.013112257957229239, 0.10956627282118277, 0.37735513521420411, 0.65719872257929113, 0.51588647842780067, -0.044763885653777619, -0.31617845375277914, -0.023779257256064865, 0.18247860592758275, 0.0053595696743599965, -0.09643212009649671, 0.010849130255828966, 0.041546277495087637, -0.01221864906974642, -0.012840825198299882, 0.0067114990087955486, 0.0022486072409952287, -0.0021795036186277044, 6.5451282125215034e-006, 0.00038865306282092672, -8.8504109208203182e-005, -2.4241545757030318e-005, 1.2776952219379579e-005, -1.5290717580684923e-006]),
'db13': np.array([0.0092021335389622788, 0.082861243872901946, 0.31199632216043488, 0.61105585115878114, 0.58888957043121193, 0.086985726179645007, -0.31497290771138414, -0.12457673075080665, 0.17947607942935084, 0.072948933656788742, -0.10580761818792761, -0.026488406475345658, 0.056139477100276156, 0.0023799722540522269, -0.023831420710327809, 0.0039239414487955773, 0.0072555894016171187, -0.002761911234656831, -0.0013156739118922766, 0.00093232613086724904, 4.9251525126285676e-005, -0.00016512898855650571, 3.0678537579324358e-005, 1.0441930571407941e-005, -4.7004164793608082e-006, 5.2200350984547998e-007]),
'db14': np.array([0.0064611534600864905, 0.062364758849384874, 0.25485026779256437, 0.55430561794077093, 0.63118784910471981, 0.21867068775886594, -0.27168855227867705, -0.21803352999321651, 0.13839521386479153, 0.13998901658445695, -0.086748411568110598, -0.071548955503983505, 0.05523712625925082, 0.026981408307947971, -0.030185351540353976, -0.0056150495303375755, 0.012789493266340071, -0.00074621898926387534, -0.003849638868019787, 0.001061691085606874, 0.00070802115423540481, -0.00038683194731287514, -4.1777245770370672e-005, 6.875504252695734e-005, -1.0337209184568496e-005, -4.3897049017804176e-006, 1.7249946753674012e-006, -1.7871399683109222e-007]),
'db15': np.array([0.0045385373615773762, 0.046743394892750617, 0.20602386398692688, 0.49263177170797529, 0.64581314035721027, 0.33900253545462167, -0.19320413960907623, -0.28888259656686216, 0.065282952848765688, 0.19014671400708816, -0.039666176555733602, -0.11112093603713753, 0.033877143923563204, 0.054780550584559995, -0.025767007328366939, -0.020810050169636805, 0.015083918027862582, 0.0051010003604228726, -0.0064877345603061454, -0.00024175649075894543, 0.0019433239803823459, -0.00037348235413726472, -0.00035956524436229364, 0.00015589648992055726, 2.579269915531323e-005, -2.8133296266037558e-005, 3.3629871817363823e-006, 1.8112704079399406e-006, -6.3168823258794506e-007, 6.1333599133037138e-008]),
'db16': np.array([0.0031892209253436892, 0.034907714323629047, 0.1650642834886438, 0.43031272284545874, 0.63735633208298326, 0.44029025688580486, -0.089751089402363524, -0.32706331052747578, -0.02791820813292813, 0.21119069394696974, 0.027340263752899923, -0.13238830556335474, -0.0062397227521562536, 0.075924236044457791, -0.0075889743686425939, -0.036888397691556774, 0.010297659641009963, 0.013993768859843242, -0.0069900145633907508, -0.0036442796214883506, 0.00312802338120381, 0.00040789698084934395, -0.00094102174935854332, 0.00011424152003843815, 0.00017478724522506327, -6.103596621404321e-005, -1.394566898819319e-005, 1.133660866126152e-005, -1.0435713423102517e-006, -7.3636567854418147e-007, 2.3087840868545578e-007, -2.1093396300980412e-008]),
'db17': np.array([0.0022418070010387899, 0.025985393703623173, 0.13121490330791097, 0.37035072415288578, 0.61099661568502728, 0.5183157640572823, 0.027314970403312946, -0.32832074836418546, -0.12659975221599248, 0.19731058956508457, 0.10113548917744287, -0.12681569177849797, -0.057091419631858077, 0.081105986654080822, 0.022312336178011833, -0.046922438389378908, -0.0032709555358783646, 0.022733676583919053, -0.0030429899813869555, -0.0086029215203478147, 0.0029679966915180638, 0.0023012052421511474, -0.001436845304805, -0.00032813251941022427, 0.00043946542776894542, -2.5610109566546042e-005, -8.2048032024582121e-005, 2.3186813798761639e-005, 6.9906009850812941e-006, -4.5059424772259631e-006, 3.0165496099963414e-007, 2.9577009333187617e-007, -8.4239484460081536e-008, 7.2674929685663697e-009]),
'db18': np.array([0.0015763102184365595, 0.019288531724094969, 0.10358846582214751, 0.31467894133619284, 0.57182680776508177, 0.57180165488712198, 0.14722311196952223, -0.29365404073579809, -0.21648093400458224, 0.14953397556500755, 0.16708131276294505, -0.092331884150304119, -0.10675224665906288, 0.064887216212358198, 0.057051247739058272, -0.04452614190225633, -0.023733210395336858, 0.026670705926689853, 0.0062621679544386608, -0.013051480946517112, 0.00011863003387493042, 0.0049433436054565939, -0.0011187326669886426, -0.0013405962983313922, 0.00062846568296447147, 0.0002135815619103188, -0.00019864855231101547, -1.5359171230213409e-007, 3.7412378807308472e-005, -8.5206025374234635e-006, -3.3326344788769603e-006, 1.7687129836228861e-006, -7.691632689865049e-008, -1.1760987670250871e-007, 3.0688358630370302e-008, -2.5079344549419292e-009]),
'db19': np.array([0.0011086697631864314, 0.01428109845082521, 0.08127811326580564, 0.26438843174202237, 0.52443637746688621, 0.60170454913009164, 0.26089495265212009, -0.22809139421653665, -0.28583863175723145, 0.074652269708066474, 0.21234974330662043, -0.033518541903202262, -0.14278569504021468, 0.027584350624887129, 0.086906755555450702, -0.026501236250778635, -0.045674226277784918, 0.021623767409452484, 0.019375549889114482, -0.013988388678695632, -0.0058669222811121953, 0.0070407473670804953, 0.00076895435922424884, -0.0026875518007344408, 0.00034180865344939543, 0.0007358025205041731, -0.00026067613568119951, -0.00012460079173506306, 8.7112704672504432e-005, 5.1059504870906939e-006, -1.6640176297224622e-005, 3.0109643163099385e-006, 1.5319314766978769e-006, -6.8627556577981102e-007, 1.4470882988040879e-008, 4.6369377758023682e-008, -1.1164020670405678e-008, 8.6668488390344833e-010]),
'db20': np.array([0.00077995361366591117, 0.010549394624937735, 0.063423780459005291, 0.21994211355113222, 0.47269618531033147, 0.61049323893785579, 0.36150229873889705, -0.13921208801128787, -0.32678680043353758, -0.016727088308801888, 0.22829105082013823, 0.039850246458519104, -0.15545875070604531, -0.024716827337521424, 0.10229171917513397, 0.0056322468576854544, -0.061722899624668884, 0.0058746818113949465, 0.032294299530119162, -0.0087893249245557647, -0.013810526137727442, 0.0067216273018096935, 0.0044205423867663502, -0.003581494259744107, -0.00083156217287724745, 0.0013925596193045254, -5.3497598443404532e-005, -0.0003851047486990061, 0.00010153288973669777, 6.7742808283730477e-005, -3.7105861833906152e-005, -4.3761438621821972e-006, 7.2412482876637907e-006, -1.0119940100181473e-006, -6.847079596993149e-007, 2.633924226266962e-007, 2.0143220235374613e-010, -1.8148432482976221e-008, 4.05612705554717e-009, -2.9988364896157532e-010]),
}
def _get_filters(wavelet):
global _db_kernels
try:
lowpass = _db_kernels[wavelet]
except KeyError:
raise ValueError("Wavelet type not supported: ('{0}')".format(wavelet))
highpass = lowpass[::-1].copy()
highpass[1::2] *= -1
return lowpass, highpass
def _arrange_filter_matrices(shape, wavelet):
filter_low, filter_high = _get_filters(wavelet)
assert len(shape) == 2
assert shape[0] == shape[1], "Shape must be square (at least for now)"
levels_list = range(1, int(np.log2(shape[0]))+1)
max_level = int(np.log2(max(shape)))
# Setup matrices
Ws = [_create_W(shape, level, filter_low, filter_high) for level in levels_list]
Wgs = []
# Combine all the matrices for the steps where we throw away the coefficients.
Wg = np.asmatrix(np.eye(shape[0], shape[1]))
for l in range(0, max_level):
new_M = Ws[l] * Wg
if np.min(new_M.shape) >= SPARSITY_THRESHOLD:
new_M = scipy.sparse.csr_matrix(new_M)
Wgs.append(new_M)
Wg = _create_single(shape, l+1, filter_low) * Wg
Wgs.append(Wg)
Wgs = Wgs[::-1]
WsT = [W.T for W in Ws]
WgsT = [Wg.T for Wg in Wgs]
return Wgs, WgsT, Ws, WsT, max_level
def daubechies_factory(shape, wavelet='db2'):
"""
Creates a forward and an inverse discrete wavelet transform function.
The function is specialized for a specific size and wavelet.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
shape : tuple
A tuple describing the size of the input, for instance ``(32, 32)``. Values must be powers of two.
wavelet : str
Type of wavelet described as a string. Supported values are ``'db1'``, ``'db2'``, ... ``'db20'``. What is called, for instance, `db3` is what is generally called the D6 wavelet (since it uses a kernel of size 6). The string ``'haar'`` is a synonym for ``'db1'``.
Returns
-------
wavedec2 : func(A[, levels])
Returns a function that takes an argument, `A`, input data that must be of the size specified above. It also takes an optional argument `levels`, where you can specify how many coefficient levels you plan to use. It will return an array with the coefficients of shape ``(2**levels, 2**levels)``.
waverec2 : func(coefs)
Returns a function that takes a single argument, `coefs`, the coefficients to use to reconstruct the spatial information.
Examples
--------
>>> import amitgroup as ag
>>> import amitgroup.util.wavelet
>>> import matplotlib.pylab as plt
>>> face = ag.io.load_example('faces')[0]
To compress a face and then inspect the results, let's first create the transform functions:
>>> wavedec2, waverec2 = ag.util.wavelet.daubechies_factory(face.shape, 'db8')
And then deconstruct a face to coefficients and the reconstruct it again. Since we only used 4 coefficient levels, information will be lost.
>>> new_face = waverec2(wavedec2(face, levels=4))
>>> ag.plot.images([face, new_face])
>>> plt.show()
"""
if isinstance(shape, int): # One dimensional!
Wgs, WgsT, Ws, WsT, max_level = _arrange_filter_matrices((shape, shape), wavelet)
def wavedec(A, levels=np.inf):
A = A.reshape((len(A), 1))
levels = min(max_level, levels)
coefs = Wgs[levels] * A
for l in range(levels-1, 0, -1):
N = 1 << l
coefs[:N] = Ws[max_level-l] * coefs[:N]
return np.asarray(coefs).flatten()
def waverec(coefs):
levels = int(np.log2(coefs.shape[0]))
A = coefs.reshape((len(coefs), 1)).copy()
for l in range(1, levels):
N = 1 << l
A[:N] = WsT[max_level-l] * A[:N]
A = WgsT[levels] * A
return np.asarray(A).flatten()
return wavedec, waverec
elif isinstance(shape, tuple) and len(shape) == 2: # 2 dimensional!
Wgs, WgsT, Ws, WsT, max_level = _arrange_filter_matrices(shape, wavelet)
def wavedec2(A, levels=np.inf):
levels = min(max_level, levels)
coefs = Wgs[levels] * A * WgsT[levels]
for l in range(levels-1, 0, -1):
N = 1 << l
L = max_level-l
coefs[:N,:N] = Ws[L] * coefs[:N,:N] * WsT[L]
return np.asarray(coefs)
def waverec2(coefs, levels=np.inf):
#print coefs.shape
levels = int(np.log2(coefs.shape[0]))
#levels = min(max_level, levels)
A = coefs.copy()
for l in range(1, levels):
N = 1 << l
L = max_level-l
A[:N,:N] = WsT[L] * A[:N,:N] * Ws[L]
return np.asarray(WgsT[levels] * A * Wgs[levels])
return wavedec2, waverec2
else:
raise ValueError("Shape must be either integer or tuple of size two")
# CACHED 1-D
################################################################################
_db_wavedec_cache = {}
_db_waverec_cache = {}
def wavedec(A, wavelet='db2', levels=np.inf, length=None):
"""
Performs a 1D wavelet decomposition (forward transform).
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet1d`
Parameters
----------
A : ndarray
1D input data. Length must be powers of two.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
levels : int
Specify how many levels of coefficients you plan to use. The default is ``np.inf``, which will default to the maximum number possible, which will make the coefficient array the same length as `A`. Notice that `levels` is zero-based, in the sense that entering 0 is valid and will the transform operate only on the energy-level coefficient.
"""
global _db_wavedec_cache, _db_waverec_cache
tup = (length or len(A), wavelet)
try:
dec = _db_wavedec_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec_cache[tup] = dec
_db_waverec_cache[tup] = rec
return dec(A, levels)
def waverec(coefs, wavelet='db2', length=None):
"""
Performs a 1D wavelet reconstruction (inverse transform).
In :func:`wavedec`, you specify `levels`, which is not done in this function since it can be inferred from the shape of `coefs`.
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet1d`
Parameters
----------
A : ndarray
1D input data. Length must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
"""
global _db_wavedec_cache, _db_waverec_cache
tup = (length or len(coefs), wavelet)
try:
rec = _db_waverec_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec_cache[tup] = dec
_db_waverec_cache[tup] = rec
return rec(coefs)
# CACHED 2-D
################################################################################
_db_wavedec2_cache = {}
_db_waverec2_cache = {}
def wavedec2(A, wavelet='db2', levels=np.inf, shape=None):
"""
Performs a 2D wavelet decomposition (forward transform).
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
A : ndarray
2D input data. Shape must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
levels : int
Specify how many levels of coefficients you plan to use. The default is ``np.inf``, which will default to the maximum number possible, which will make the coefficient array the same size as `A`. Notice that `levels` is zero-based, in the sense that entering 0 is valid and will the transform operate only on the energy-level coefficient.
"""
global _db_wavedec2_cache, _db_waverec2_cache
tup = (shape or A.shape, wavelet)
try:
dec = _db_wavedec2_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec2_cache[tup] = dec
_db_waverec2_cache[tup] = rec
return dec(A, levels)
def waverec2(coefs, wavelet='db2', shape=None):
"""
Performs a 2D wavelet reconstruction (inverse transform).
In :func:`wavedec2`, you specify `levels`, which is not done in this function since it can be inferred from the shape of `coefs`.
.. note::
This function runs :func:`daubechies_factory` for you and caches the value. This means that first time you call it,
performance will be slower than expected. You will also incur a dictionary lookup, which might cost you 100 ns.
.. seealso::
:ref:`wavelet2d`
Parameters
----------
A : ndarray
2D input data. Shape must be powers of two and square.
wavelet : str
Wavelet type. See :func:`daubechies_factory`.
"""
global _db_wavedec2_cache, _db_waverec2_cache
tup = (shape or coefs.shape, wavelet)
try:
rec = _db_waverec2_cache[tup]
except KeyError:
dec, rec = daubechies_factory(*tup)
_db_wavedec2_cache[tup] = dec
_db_waverec2_cache[tup] = rec
return rec(coefs)
# HELPER FUNCTIONS
################################################################################
def smart_flatten(coefficients):
"""
This flattens 2D coefficients in a smart way, so that all coefficients levels are grouped into contiguous blocks, starting from the low-frequency coefficients going to the high-frequency ones.
Notice that 1D coefficients are already flat and sorted.
Parameters
----------
coefficients : ndarray
Wavelet coefficients returned by :func:`wavedec2`.
"""
assert coefficients.shape == (8, 8), "TODO: Has not been generalized, only works with shape (8, 8), not {0}".format(coefficients.shape)
olds = np.zeros(64)
olds[0] = coefficients[0,0]
olds[1] = coefficients[1,0]
olds[2] = coefficients[0,1]
olds[3] = coefficients[1,1]
olds[4:8] = coefficients[2:4,0:2].flatten()
olds[8:12] = coefficients[0:2,2:4].flatten()
olds[12:16] = coefficients[2:4,2:4].flatten()
olds[16:32] = coefficients[4:8,0:4].flatten()
olds[32:48] = coefficients[0:4,4:8].flatten()
olds[48:64] = coefficients[4:8,4:8].flatten()
return olds
def smart_deflatten(flatcoefs):
"""
Inverse function of :func:`smart_flatten`.
Parameters
----------
flatcoefs : ndarray
Flat array of coefficients returned by :func:`smart_flatten`.
"""
N = int(np.sqrt(len(flatcoefs)))
A = np.arange(N*N, dtype=int).reshape(N, N)
indices = new2old(A).astype(int)
new_indices = np.empty(indices.shape, dtype=int)
for i, index in enumerate(indices):
new_indices[index] = i
news = flatcoefs[new_indices].reshape(8, 8).copy()
return news
def structured_to_contiguous(structured_coefs):
"""
Converts a structured list-of-tuples-of-arrays-of-coefficients to a contiguous block.
The input format follows `PyWavelets <http://www.pybytes.com/pywavelets/>`_.
Works for both 1D and 2D coefficients.
Parameters
----------
structured_coefs : list
List of coefficients.
"""
in2d = structured_coefs[0].ndim == 2
if in2d:
N = 1 << (len(structured_coefs)-1)
u = np.zeros((N, N))
u[0,0] = float(structured_coefs[0])
for level, c in enumerate(structured_coefs):
if level != 0:
S = len(c[0])
u[S:2*S,:S] = c[0]
u[:S,S:2*S] = c[1]
u[S:2*S,S:2*S] = c[2]
return u
else:
N = 1 << (len(structured_coefs)-1)
u = np.zeros(N)
u[0] = float(structured_coefs[0])
for level, c in enumerate(structured_coefs):
if level != 0:
S = len(c)
u[S:2*S] = c
return u
def contiguous_to_structured(contiguous_coefs, levels=np.inf):
"""
Convert from continguous array to a structured format (identical to the one used in PyWavelets).
Works for both 1D and 2D coefficients.
Parameters
----------
contiguous_coefs : ndarray
Coefficients as returned by our wavelet functions.
levels : int, optional
If you don't want all levels, you can set this value to specify how many you want. Notice that this
refers to levels of `wavelet` coefficients, which means that the scaling coefficient is not included and
will always be returned, even if `levels` is set to zero.
"""
u = contiguous_coefs
in2d = contiguous_coefs.ndim == 2
N = int(np.log2(len(contiguous_coefs)))
coefs = []
if in2d:
coefs.append( contiguous_coefs[:1,:1] )
for level in range(min(levels, N)):
S = 1 << level
coefs.append( (contiguous_coefs[S:2*S,:S], contiguous_coefs[:S,S:2*S], contiguous_coefs[S:2*S,S:2*S]) )
else:
coefs.append( contiguous_coefs[:1] )
for level in range(min(levels, N)):
S = 1 << level
coefs.append( contiguous_coefs[S:2*S] )
return coefs
| {
"repo_name": "amitgroup/amitgroup",
"path": "amitgroup/util/wavelet/periodic_daubechies.py",
"copies": "1",
"size": "25304",
"license": "bsd-3-clause",
"hash": -1045014297493018100,
"line_mean": 54.8587196468,
"line_max": 957,
"alpha_frac": 0.6888634208,
"autogenerated": false,
"ratio": 2.728488246711236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3917351667511236,
"avg_score": null,
"num_lines": null
} |
"""Aimsun dummy server.
This script creates a dummy server mimicking the functionality in the Aimsun
runner script. Used for testing purposes.
"""
from thread import start_new_thread
import socket
import struct
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import flow.utils.aimsun.constants as ac # noqa
PORT = 9999
entered_vehicles = [1, 2, 3, 4, 5]
exited_vehicles = [6, 7, 8, 9, 10]
tl_ids = [1, 2, 3, 4, 5]
def send_message(conn, in_format, values):
"""Send a message to the client.
If the message is a string, it is sent in segments of length 256 (if the
string is longer than such) and concatenated on the client end.
Parameters
----------
conn : socket.socket
socket for server connection
in_format : str
format of the input structure
values : tuple of Any
commands to be encoded and issued to the client
"""
if in_format == 'str':
packer = struct.Struct(format='i')
values = values[0]
# when the message is too large, send value in segments and inform the
# client that additional information will be sent. The value will be
# concatenated on the other end
while len(values) > 256:
# send the next set of data
conn.send(values[:256])
values = values[256:]
# wait for a reply
data = None
while data is None:
data = conn.recv(2048)
# send a not-done signal
packed_data = packer.pack(*(1,))
conn.send(packed_data)
# send the remaining components of the message (which is of length less
# than or equal to 256)
conn.send(values)
# wait for a reply
data = None
while data is None:
data = conn.recv(2048)
# send a done signal
packed_data = packer.pack(*(0,))
conn.send(packed_data)
else:
packer = struct.Struct(format=in_format)
packed_data = packer.pack(*values)
conn.send(packed_data)
def retrieve_message(conn, out_format):
"""Retrieve a message from the client.
Parameters
----------
conn : socket.socket
socket for server connection
out_format : str or None
format of the output structure
Returns
-------
Any
received message
"""
unpacker = struct.Struct(format=out_format)
try:
data = conn.recv(unpacker.size)
unpacked_data = unpacker.unpack(data)
finally:
pass
return unpacked_data
def threaded_client(conn):
"""Create a dummy threaded process.
For testing purposes.
Parameters
----------
conn : socket.socket
socket for server connection
"""
# send feedback that the connection is active
conn.send(b'Ready.')
done = False
while not done:
# receive the next message
data = conn.recv(256)
if data is not None:
# if the message is empty, search for the next message
if data == '':
continue
# convert to integer
data = int(data)
if data == ac.VEH_GET_ENTERED_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global entered_vehicles
if len(entered_vehicles) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in entered_vehicles])
send_message(conn, in_format='str', values=(output,))
entered_vehicles = []
elif data == ac.VEH_GET_EXITED_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global exited_vehicles
if len(exited_vehicles) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in exited_vehicles])
send_message(conn, in_format='str', values=(output,))
exited_vehicles = []
elif data == ac.VEH_GET_STATIC:
send_message(conn, in_format='i', values=(0,))
retrieve_message(conn, 'i')
output = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, False, 18, 19, 20, 21, 22, 23, 24, 25, 26)
send_message(conn,
in_format='i i i f f f f f f f f f f i i i ? '
'f f f f f i i i i',
values=output)
elif data == ac.VEH_GET_TRACKING:
send_message(conn, in_format='i', values=(0,))
info_bitmap = None
while info_bitmap is None:
info_bitmap = conn.recv(2048)
output = (4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27)
send_message(conn,
in_format='f f f f f f f f f f f f f i i i i i i '
'i i',
values=output)
elif data == ac.TL_GET_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global tl_ids
if len(tl_ids) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in tl_ids])
send_message(conn, in_format='str', values=(output,))
tl_ids = []
# in case the message is unknown, return -1001
else:
send_message(conn, in_format='i', values=(-1001,))
while True:
# tcp/ip connection from the aimsun process
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('localhost', PORT))
# connect to the Flow instance
server_socket.listen(10)
c, address = server_socket.accept()
# start the threaded process
start_new_thread(threaded_client, (c,))
| {
"repo_name": "cathywu/flow",
"path": "tests/dummy_server.py",
"copies": "1",
"size": "6442",
"license": "mit",
"hash": -3998859465154709000,
"line_mean": 30.8910891089,
"line_max": 79,
"alpha_frac": 0.5158335921,
"autogenerated": false,
"ratio": 4.0901587301587305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510599232225873,
"avg_score": null,
"num_lines": null
} |
# a_input = input('Please input a number:')
# print('This number is',a_input)
# a_input=int(input('please input a number:'))
# if a_input==1:
# print('This is a good one')
# elif a_input==2:
# print('see u next time')
# else:
# print('good luck')
# myScore = int(input("Please input your score:"))
# if myScore >=90:
# print('A')
# elif 80<myScore<90:
# print('B')
# elif 70<myScore<80:
# print('C')
# else:
# print('D')
#
# a_tuple = (12, 3, 5, 15 , 6)
# another_tuple = 12, 3, 5, 15 , 6
# print(a_tuple)
# a = [1,2,3,4,1,1,-1]
# print(a.index(2))
# print(a[2])
# a = [1,2,3,4,1,3,4,5,6,1,-1]
# print(a.count(1))
# # print(a.count(a[]))
# re=set(a)
# print(re)
# for i in re:
# print(i,'出现次数:',a.count(i))
# a_list = [a.count(i)]
# a={1, 2, 3, 4, 5, 6, -1}
# print(a[0])
# a_list = [1,2,3,4,5,6,7,8]
# d1 = {'apple':1, 'pear':2, 'orange':3}
# d2 = {1:'a', 2:'b', 3:'c'}
# d3 = {1:'a', 'b':2, 'c':3}
#
# print(d1['apple']) # 1
# print(a_list[0]) # 1
#
# del d1['pear'] #删除字典中的元素
# print(d1) # {'orange': 3, 'apple': 1}
#
# d1['b'] = 20 #在字典中插入元素
# print(d1) # {'orange': 3, 'b': 20, 'pear': 2, 'apple': 1}
# def func():
# return 0
#
# d4 = {'apple':[1,2,3], 'pear':{1:3, 3:'a'}, 'orange':func}
# print(d4['pear'][3]) # a
# print(d4['orange'][func()]) # a
# lists = [5,3,2]
# count = len(lists)
# for i in range(0, count):
# for j in range(i + 1, count):
# if lists[i] > lists[j]:
# lists[i], lists[j] = lists[j], lists[i]
# # return lists
# print(lists)
# print(i)
# print(j)
# print(lists)
#冒泡排序
lists = [6,3,2,7,5,9,8,1,4]
for i in range(len(lists)-1):
for j in range(len(lists)-1-i):
if lists[j] >lists[j+1]:
lists[j], lists[j+1] = lists[j+1], lists[j]
# print(lists)
print(lists)
| {
"repo_name": "1065865483/0python_script",
"path": "Python/003.py",
"copies": "1",
"size": "1908",
"license": "mit",
"hash": 6627837142604277000,
"line_mean": 21.6585365854,
"line_max": 61,
"alpha_frac": 0.491926803,
"autogenerated": false,
"ratio": 2.118586088939567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3110512891939567,
"avg_score": null,
"num_lines": null
} |
# a = input("Type a number")
# b = input("type a number")
#
# a =int(a)
# b = int(b)
#
# try:
# print(a/b)
# except ZeroDivisionError:
# print("Do not enter that")
# try:
# a = input("Skriv ett heltal: \n")
# b = input("Skriv ett heltal: \n")
# a = int(a)
# b = int(b)
# print(a/b)
# except(ZeroDivisionError, ValueError):
# print("Invalid input")
# # 4.1
# def squared(x):
# return int(x) ** int(x)
# print(squared(input("Skriv ett tal: ")))
# # 4.2
# def printer(str):
# print(str)
# return
#
# print(printer(input("print this: ")))
# # 4.3
#
# def multiplier(x, y, z, a=1, b=1):
# return x * y * z * a * b
#
# x = 2
# y = 2
# z = 2
# print(multiplier(x,y,z,12))
# 4.4
#
# def divider(int):
# return int/2
#
# def multiplier(int):
# return int*4
# try:
# i = int(input("Skriv ett heltal: "))
# i = divider(i)
# i = multiplier(i)
# print(i)
# except(ValueError):
# print("Faulty input")
# 4.5
def floatMaker(strng):
try:
r = float(strng)
return r
except(ValueError):
print("Only nbrs")
return "Fuck you"
print(floatMaker("2a"))
| {
"repo_name": "Frikeer/LearnPython",
"path": "exc4.py",
"copies": "1",
"size": "1156",
"license": "unlicense",
"hash": -7018978671122824000,
"line_mean": 15.2816901408,
"line_max": 42,
"alpha_frac": 0.5302768166,
"autogenerated": false,
"ratio": 2.4491525423728815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8476495086672412,
"avg_score": 0.0005868544600938967,
"num_lines": 71
} |
A = [int(i) for i in input()]
B = [int(i) for i in input()]
C = [int(i) for i in input()]
D = [int(i) for i in input()]
n = int(input())
W = []
T = []
for _ in range(n) :
w, t = list(map(int, input().split()))
W.append(w)
T.append(t)
index_list = [0] * 4
a, b, c, d = index_list
cal_case_1 = [lambda x : x-1, lambda x : x+1, lambda x : x-1, lambda x : x+1]
cal_case_2 = [lambda x : x+1, lambda x : x-1, lambda x : x+1, lambda x : x-1]
for w, t in zip(W, T) :
bool_list = [A[a+2%8] != B[b+6%8], B[b+2%8] != C[c+6%8], C[c+2%8] != D[d+6%8]
G = [0]
i = 0
for bo in bool_list :
if not bo:
i += 1
G.append(i)
if w*t in [1, -2, 3, -4] :
for j, g in enumerate(G) :
if g == G[w-1] :
index_list[j] = cal_case_1[j](index_list[j])
else :
for j, g in enumerate(G) :
if g == G[w-1] :
index_list[j] = cal_case_2[j](index_list[j])
a, b, c, d = index_list
print(A[a%8] + B[b%8]*2 + C[c%8]*4 + D[d%8]*8)
| {
"repo_name": "TeamLab/lab_study_group",
"path": "2018/CodingInterview/3주차/index.py",
"copies": "2",
"size": "1032",
"license": "mit",
"hash": 3089146759025189000,
"line_mean": 28.4857142857,
"line_max": 81,
"alpha_frac": 0.4525193798,
"autogenerated": false,
"ratio": 2.2483660130718954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37008853928718954,
"avg_score": null,
"num_lines": null
} |
a=int(raw_input())
for i in range(0,a):
arr=raw_input()
array=raw_input()
newstr=[]
c=1
ar=[]
for i in range(0,len(arr)):
ar.append(arr[i])
#print ar
for j in range(0,len(arr)-1):
for k in range(j+1,len(arr)):
if(arr[j]==arr[k]):
c=0
#ar.remove(arr[j])
#newstr=ar
ar[k]='a'
for i in range(0,len(arr)):
if(ar[i]!='a'):
newstr.append(ar[i])
#print newstr
if(c==1):
#for i in range(0,len(arr)):
yes=[]
list=[]
for j in range(0,len(arr)):
list.append(arr[j])
for k in range(ord('A'),ord('Z')+1):
if(chr(k) not in arr):
list.append(chr(k))
#print list
yes=arr
else:
yes=[]
list=[]
for j in range(0,len(newstr)):
list.append(newstr[j])
for k in range(ord('A'),ord('Z')+1):
if(chr(k) not in newstr):
list.append(chr(k))
#print list
yes=newstr
key=[]
for i in range(0,len(array)):
key.append(array[i])
beta=[]
gamma=[]
gamma=sorted(yes)
delta=[]
#print gamma
#for i in range(0,len(yes)):
#for j in range(0,len(list)/len(yes)+1):
for i in range(0,len(yes)):
for j in range(0,len(yes)):
if(gamma[i]==list[j]):
delta.append(j)
break
#print delta
pi=[]
for i in range(0,len(yes)):
k=0
while(delta[i]+len(yes)*k <26):
pi.append(list[delta[i]+len(yes)*k])
k=k+1
#print pi
pin=[]
for i in range(0,len(yes)):
k=0
while(delta[i]+len(yes)*k <26):
pin.append(list[delta[i]+len(yes)*k])
k=k+1
pin.append(' ')
#print pin
#print key
count=0
cou=[]
for i in range(0,len(array)):
if(array[i]==' '):
cou.append(count)
count=0
else:
count=count+1
#print cou
import sys
co=0
final=[]
coun=[]
ci=0
for i in range(0,len(cou)):
ci=ci+cou[i]
coun.append(ci)
#for i in range(0,len(coun)):
# print coun[i]
i=0
#print len(coun)
for k in range(0,len(key)):
#print coun[i]
for j in range(0,len(pi)):
if(key[k]==pi[j]):
final.append(chr(ord('A')+j))
#sys.stdout.write(chr(ord('A')+j))
break
#if(key[k]==pi[j] and k==coun[i]):
#print co
#final.append(' ')
#final.append(chr(ord('A')+j))
#break
#print k,coun[i]
#if(k==coun[i] and i<len(cou)):
# i=i+1
#print final
for i in range(0,len(coun)):
final.insert(coun[i]+i,' ')
#print coun[i]
for j in range(0,len(final)):
sys.stdout.write(final[j])
sys.stdout.write('\n')
#for i in range(0,len(final)):
#newfinal=[]
#for k in range(0,len(final)):
# if(k==coun[i]):
# i=i+1
# print i
# else:
# newfinal=final[:k] + final[k:]
#print newfinal
| {
"repo_name": "shreyakupadhyay/pythonbite",
"path": "enigmum/Cryptography.py",
"copies": "1",
"size": "3265",
"license": "mit",
"hash": 436113134447901300,
"line_mean": 24.5078125,
"line_max": 53,
"alpha_frac": 0.4355283308,
"autogenerated": false,
"ratio": 3.1454720616570326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40810003924570326,
"avg_score": null,
"num_lines": null
} |
""" Aioauth-client example. """
import asyncio
from aiohttp import web
import html
from aioauth_client import (
FacebookClient,
GithubClient,
GoogleClient,
OAuth1Client,
TwitterClient,
VKClient,
YandexClient,
)
app = web.Application()
clients = {
'twitter': {
'class': TwitterClient,
'init': {
'consumer_key': 'oUXo1M7q1rlsPXm4ER3dWnMt8',
'consumer_secret': 'YWzEvXZJO9PI6f9w2FtwUJenMvy9SPLrHOvnNkVkc5LdYjKKup',
},
},
'github': {
'class': GithubClient,
'init': {
'client_id': 'b6281b6fe88fa4c313e6',
'client_secret': '21ff23d9f1cad775daee6a38d230e1ee05b04f7c',
},
},
'google': {
'class': GoogleClient,
'init': {
'client_id': '150775235058-9fmas709maee5nn053knv1heov12sh4n.apps.googleusercontent.com', # noqa
'client_secret': 'df3JwpfRf8RIBz-9avNW8Gx7',
'params': {'scope': 'email profile'},
},
},
'yandex': {
'class': YandexClient,
'init': {
'client_id': 'e19388a76a824b3385f38beec67f98f1',
'client_secret': '1d2e6fdcc23b45849def6a34b43ac2d8',
},
},
'vk': {
'class': VKClient,
'init': {
'client_id': '5038699',
'client_secret': 'WgKadvY82wlnleOAyw6T',
'params': {'scope': 'offline,email'}
},
},
'facebook': {
'class': FacebookClient,
'init': {
'client_id': '384739235070641',
'client_secret': '8e3374a4e1e91a2bd5b830a46208c15a',
'params': {'scope': 'email'}
},
},
}
@asyncio.coroutine
def index(request):
return web.Response(text="""
<ul>
<li><a href="/oauth/facebook">Login with Facebook</a></li>
<li><a href="/oauth/github">Login with Github</a></li>
<li><a href="/oauth/google">Login with Google</a></li>
<li><a href="/oauth/twitter">Login with Twitter</a></li>
<li><a href="/oauth/vk">Login with VK</a></li>
</ul>
""", content_type="text/html")
# Simple Github (OAuth2) example (not connected to app)
@asyncio.coroutine
def github(request):
github = GithubClient(
client_id='b6281b6fe88fa4c313e6',
client_secret='21ff23d9f1cad775daee6a38d230e1ee05b04f7c',
)
if 'code' not in request.GET:
return web.HTTPFound(github.get_authorize_url(scope='user:email'))
# Get access token
code = request.GET['code']
token = yield from github.get_access_token(code)
assert token
# Get a resource `https://api.github.com/user`
response = yield from github.request('GET', 'user')
body = yield from response.read()
return web.Response(body=body, content_type='application/json')
@asyncio.coroutine
def oauth(request):
provider = request.match_info.get('provider')
if provider not in clients:
raise web.HTTPNotFound(reason='Unknown provider')
# Create OAuth1/2 client
Client = clients[provider]['class']
client = Client(
redirect_uri='http://%s%s' % (request.host, request.path), **clients[provider]['init'])
# Check if is not redirect from provider
if client.shared_key not in request.GET:
# For oauth1 we need more work
if isinstance(client, OAuth1Client):
token, secret = yield from client.get_request_token()
# Dirty save a token_secret
# Dont do it in production
request.app.secret = secret
request.app.token = token
# Redirect client to provider
params = clients[provider].get('params', {})
return web.HTTPFound(client.get_authorize_url(**params))
# For oauth1 we need more work
if isinstance(client, OAuth1Client):
client.oauth_token_secret = request.app.secret
client.oauth_token = request.app.token
yield from client.get_access_token(request.GET)
user, info = yield from client.user_info()
text = (
"<a href='/'>back</a><br/><br/>"
"<ul>"
"<li>ID: %(id)s</li>"
"<li>Username: %(username)s</li>"
"<li>First, last name: %(first_name)s, %(last_name)s</li>"
"<li>Gender: %(gender)s</li>"
"<li>Email: %(email)s</li>"
"<li>Link: %(link)s</li>"
"<li>Picture: %(picture)s</li>"
"<li>Country, city: %(country)s, %(city)s</li>"
"</ul>"
) % user.__dict__
text += "<code>%s</code>" % html.escape(repr(info))
return web.Response(text=text, content_type='text/html')
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/oauth/{provider}', oauth)
loop = asyncio.get_event_loop()
f = loop.create_server(app.make_handler(), '127.0.0.1', 5000)
srv = loop.run_until_complete(f)
print('serving on', srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# pylama:ignore=D
| {
"repo_name": "ei-grad/aioauth-client",
"path": "example/app.py",
"copies": "1",
"size": "4955",
"license": "mit",
"hash": -603581447197160400,
"line_mean": 29.0303030303,
"line_max": 107,
"alpha_frac": 0.5886982846,
"autogenerated": false,
"ratio": 3.2449246889325476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9332670227114115,
"avg_score": 0.00019054928368653857,
"num_lines": 165
} |
"""Aioauth-client example.
Requirements:
aioauth-client
asgi-tools
uvicorn
Run the example with uvicorn:
$ uvicorn --port 5000 example.app:app
"""
from asgi_tools import App, ResponseRedirect
import html
from pprint import pformat
from .config import CREDENTIALS
from aioauth_client import ClientRegistry, OAuth1Client, GithubClient
app = App(debug=True)
@app.route('/')
async def index(request):
return """
<link rel="stylesheet"
href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css" />
<div class="container">
<header class="navbar navbar-dark" style="background-color: #7952b3">
<h2 class="navbar-brand">AIOAuth Client Example</h2>
</header>
<ul class="nav flex-column mt-5">
<li class="nav-item">
<a class="nav-link" href="/oauth/bitbucket">Login with Bitbucket</a></li>
<li class="nav-item">
<a class="nav-link" href="/oauth/facebook">Login with Facebook</a></li>
<li class="nav-item">
<a class="nav-link" href="/oauth/github">Login with Github</a></li>
<li class="nav-item">
<a class="nav-link" href="/oauth/google">Login with Google</a></li>
<li class="nav-item">
<a class="nav-link" href="/oauth/twitter">Login with Twitter</a></li>
</ul>
</div>
"""
@app.route('/oauth/{provider}')
async def oauth(request):
provider = request.path_params.get('provider')
if provider not in CREDENTIALS:
return 404, 'Unknown provider %s' % provider
# Create OAuth1/2 client
Client = ClientRegistry.clients[provider]
params = CREDENTIALS[provider]
client = Client(**params)
client.params['oauth_callback' if issubclass(Client, OAuth1Client) else 'redirect_uri'] = \
str(request.url.with_query(''))
# Check if is not redirect from provider
if client.shared_key not in request.url.query:
# For oauth1 we need more work
if isinstance(client, OAuth1Client):
token, secret, _ = await client.get_request_token()
# Dirty save a token_secret
# Dont do it in production
request.app.secret = secret
request.app.token = token
# Redirect client to provider
return ResponseRedirect(client.get_authorize_url(access_type='offline'))
# For oauth1 we need more work
if isinstance(client, OAuth1Client):
client.oauth_token_secret = request.app.secret
client.oauth_token = request.app.token
_, meta = await client.get_access_token(request.url.query)
user, info = await client.user_info()
text = f"""
<link rel="stylesheet"
href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css" />
<div class="container">
<header class="navbar navbar-dark" style="background-color: #7952b3">
<h2 class="navbar-brand">AIOAuth Client Example ({ client.name })</h2>
</header>
<a class="btn btn-primary mt-4" href='/'>Return back</a>
<table class="table mt-4">
<tr><td> ID </td><td> { user.id } </td></tr>
<tr><td> Username </td><td> { user.username } </td></tr>
<tr><td>First, last name</td><td>{ user.first_name }, { user.last_name }</td></tr>
<tr><td>Gender</td><td> { user.gender } </td></tr>
<tr><td>Email</td><td> { user.email } </td></tr>
<tr><td>Link</td><td> { user.link } </td></tr>
<tr><td>Picture</td><td> { user.picture } </td></tr>
<tr><td>Country, City</td><td> { user.country }, { user.city } </td></tr>
</table>
<h3 class="mt-4">Raw data</h3>
<pre>{ html.escape(pformat(info)) }</pre>
<pre>{ html.escape(pformat(meta)) }</pre>
<div>
"""
return text
# Simple Github (OAuth2) example (not connected to app)
async def github(request):
github = GithubClient(
client_id='b6281b6fe88fa4c313e6',
client_secret='21ff23d9f1cad775daee6a38d230e1ee05b04f7c',
)
if 'code' not in request.url.query:
return ResponseRedirect(github.get_authorize_url(scope='user:email'))
# Get access token
code = request.url.query['code']
token, _ = await github.get_access_token(code)
assert token
# Get a resource `https://api.github.com/user`
response = await github.request('GET', 'user')
return await response.read()
# pylama:ignore=D
| {
"repo_name": "klen/aioauth-client",
"path": "example/app.py",
"copies": "1",
"size": "4687",
"license": "mit",
"hash": -8679985485114157000,
"line_mean": 35.0538461538,
"line_max": 98,
"alpha_frac": 0.584168978,
"autogenerated": false,
"ratio": 3.566971080669711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46511400586697105,
"avg_score": null,
"num_lines": null
} |
"""aiobfd: Asynchronous BFD Daemon"""
import argparse
import socket
import logging
import logging.handlers
import sys
import aiobfd
_LOG_LEVELS = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
def parse_arguments():
"""Parse the user arguments"""
parser = argparse.ArgumentParser(
description='Maintain a BFD session with a remote system')
parser.add_argument('local', help='Local IP address or hostname')
parser.add_argument('remote', help='Remote IP address or hostname')
family_group = parser.add_mutually_exclusive_group()
family_group.add_argument('-4', '--ipv4', action='store_const',
dest='family', default=socket.AF_UNSPEC,
const=socket.AF_INET,
help='Force IPv4 connectivity')
family_group.add_argument('-6', '--ipv6', action='store_const',
dest='family', default=socket.AF_UNSPEC,
const=socket.AF_INET6,
help='Force IPv6 connectivity')
parser.add_argument('-r', '--rx-interval', default=1000, type=int,
help='Required minimum Rx interval (ms)')
parser.add_argument('-t', '--tx-interval', default=1000, type=int,
help='Desired minimum Tx interval (ms)')
parser.add_argument('-m', '--detect-mult', default=1, type=int,
help='Detection multiplier')
parser.add_argument('-p', '--passive', action='store_true',
help='Take a passive role in session initialization')
parser.add_argument('-l', '--log-level', default='WARNING',
help='Logging level', choices=_LOG_LEVELS)
parser.add_argument('-o', '--no-log-to-stdout', action='store_true',
help='Disable logging to stdout; will be ignored if no'
' other logging is selected.')
parser.add_argument('-f', '--log-to-file', action='store_true',
help='Enable logging to a file on the filesystem')
parser.add_argument('-n', '--log-file', default='/var/log/aiobfd.log',
help='Path on filesystem to log to, if enabled')
parser.add_argument('-s', '--log-to-syslog', action='store_true',
help='Enable logging to a syslog handler')
parser.add_argument('-y', '--log-sock', default='/dev/log',
help='Syslog socket to log to, if enabled')
return parser.parse_args()
def main():
"""Run aiobfd"""
args = parse_arguments()
handlers = []
if (args.log_to_file or args.log_to_syslog) and not args.no_log_to_stdout:
handlers.append(logging.StreamHandler(sys.stdout))
if args.log_to_file:
handlers.append(logging.handlers.WatchedFileHandler(args.log_file))
if args.log_to_syslog:
handlers.append(logging.handlers.SysLogHandler(args.log_sock))
log_format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
logging.basicConfig(handlers=handlers, format=log_format,
level=logging.getLevelName(args.log_level))
control = aiobfd.Control(args.local, [args.remote], family=args.family,
passive=args.passive,
rx_interval=args.rx_interval*1000,
tx_interval=args.tx_interval*1000,
detect_mult=args.detect_mult)
control.run()
if __name__ == '__main__':
main()
| {
"repo_name": "netedgeplus/aiobfd",
"path": "aiobfd/__main__.py",
"copies": "1",
"size": "3533",
"license": "mit",
"hash": 8888595770207889000,
"line_mean": 46.1066666667,
"line_max": 79,
"alpha_frac": 0.5776960091,
"autogenerated": false,
"ratio": 4.1033681765389085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5181064185638908,
"avg_score": null,
"num_lines": null
} |
"""aiobfd: BFD Control Packet"""
# pylint: disable=I0011,E0632,R0902
import logging
import bitstring
log = logging.getLogger(__name__) # pylint: disable=I0011,C0103
MIN_PACKET_SIZE = 24
MIN_AUTH_PACKET_SIZE = 26
STATE_ADMIN_DOWN = 0 # AdminDown
STATE_DOWN = 1 # Down
'''
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| My Discriminator |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Your Discriminator |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Desired Min TX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Required Min RX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Required Min Echo RX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
'''
PACKET_FORMAT = (
'uint:3=version,'
'uint:5=diag,'
'uint:2=state,'
'bool=poll,'
'bool=final,'
'bool=control_plane_independent,'
'bool=authentication_present,'
'bool=demand_mode,'
'bool=multipoint,'
'uint:8=detect_mult,'
'uint:8=length,'
'uint:32=my_discr,'
'uint:32=your_discr,'
'uint:32=desired_min_tx_interval,'
'uint:32=required_min_rx_interval,'
'uint:32=required_min_echo_rx_interval'
)
PACKET_DEBUG_MSG = '\n|--------------------------------------------------\n' \
'| Vers: %d Diag: %d State: %d Poll: %d Final: %d\n' \
'| CPI: %d Auth: %d Demand: %d Multi: %d DetectMult: %d\n' \
'| Length: %d MyDisc: %d YourDisc: %d\n' \
'| TxInterval: %d RxInterval: %d EchoRxInterval: %d\n' \
'|--------------------------------------------------'
class Packet: # pylint: disable=I0011,R0903
"""A BFD Control Packet"""
def __init__(self, data, source):
self.source = source
packet = bitstring.BitString(data)
packet_length = packet.len / 8
# Ensure packet is sufficiently long to attempt unpacking it
if packet_length < MIN_PACKET_SIZE:
raise IOError('Packet size below mininum correct value.')
self.version, self.diag, self.state, self.poll, self.final, \
self.control_plane_independent, self.authentication_present,\
self.demand_mode, self.multipoint, self.detect_mult, self.length, \
self.my_discr, self.your_discr, self.desired_min_tx_interval, \
self.required_min_rx_interval, self.required_min_echo_rx_interval \
= packet.unpack(PACKET_FORMAT)
log.debug(PACKET_DEBUG_MSG, self.version, self.diag, self.state,
self.poll, self.final, self.control_plane_independent,
self.authentication_present, self.demand_mode,
self.multipoint, self.detect_mult, self.length,
self.my_discr, self.your_discr, self.desired_min_tx_interval,
self.required_min_rx_interval,
self.required_min_echo_rx_interval)
self.validate(packet_length)
def validate(self, packet_length):
"""Validate received packet contents"""
# If the version number is not correct (1), the packet MUST be
# discarded.
if self.version != 1:
raise IOError('Unsupported BFD protcol version.')
# If the Length field is less than the minimum correct value (24 if
# the A bit is clear, or 26 if the A bit is set), the packet MUST be
# discarded.
if self.authentication_present and self.length < MIN_AUTH_PACKET_SIZE:
raise IOError('Packet size below mininum correct value.')
elif ((not self.authentication_present)
and self.length < MIN_PACKET_SIZE):
raise IOError('Packet size below mininum correct value.')
# If the Length field is greater than the payload of the encapsulating
# protocol, the packet MUST be discarded.
if self.length > packet_length:
raise IOError('Packet length field larger than received data.')
# If the Multipoint (M) bit is nonzero, the packet MUST be discarded.
if self.multipoint:
raise IOError('Multipoint bit should be 0.')
# If the My Discriminator field is zero, the packet MUST be discarded.
if not self.my_discr:
raise IOError('Discriminator field is zero.')
# If the Your Discriminator field is zero and the State field is not
# Down or AdminDown, the packet MUST be discarded.
if self.state not in [STATE_DOWN, STATE_ADMIN_DOWN] \
and (not self.your_discr):
raise IOError('Your Discriminator can\'t be zero in this state.')
| {
"repo_name": "netedgeplus/aiobfd",
"path": "aiobfd/packet.py",
"copies": "1",
"size": "5197",
"license": "mit",
"hash": -593048696572567900,
"line_mean": 41.5983606557,
"line_max": 79,
"alpha_frac": 0.5135655186,
"autogenerated": false,
"ratio": 4.088906372934697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 122
} |
"""aiobfd: BFD Control process"""
# pylint: disable=I0011,R0913
import asyncio
import logging
import socket
from .transport import Server
from .session import Session
from .packet import Packet
log = logging.getLogger(__name__) # pylint: disable=I0011,C0103
CONTROL_PORT = 3784
class Control:
"""BFD Control"""
def __init__(self, local, remotes, family=socket.AF_UNSPEC, passive=False,
tx_interval=1000000, rx_interval=1000000, detect_mult=3,
loop=asyncio.get_event_loop()):
self.loop = loop
self.rx_queue = asyncio.Queue()
# Initialize client sessions
self.sessions = list()
for remote in remotes:
log.debug('Creating BFD session for remote %s.', remote)
self.sessions.append(
Session(local, remote, family=family, passive=passive,
tx_interval=tx_interval, rx_interval=rx_interval,
detect_mult=detect_mult))
# Initialize server
log.debug('Setting up UDP server on %s:%s.', local, CONTROL_PORT)
task = self.loop.create_datagram_endpoint(
lambda: Server(self.rx_queue),
local_addr=(local, CONTROL_PORT),
family=family)
self.server, _ = self.loop.run_until_complete(task)
log.info('Accepting traffic on %s:%s.',
self.server.get_extra_info('sockname')[0],
self.server.get_extra_info('sockname')[1])
async def rx_packets(self):
"""Process a received BFD Control packets"""
log.debug('Control process ready to receive packets.')
while True:
packet, source = await self.rx_queue.get()
log.debug('Received a new packet from %s.', source)
self.process_packet(packet, source)
self.rx_queue.task_done()
def process_packet(self, data, source):
"""Process a received packet"""
try:
packet = Packet(data, source)
except IOError as exc:
log.info('Dropping packet: %s', exc)
return
# If the Your Discriminator field is nonzero, it MUST be used to select
# the session with which this BFD packet is associated. If no session
# is found, the packet MUST be discarded.
if packet.your_discr:
for session in self.sessions:
if session.local_discr == packet.your_discr:
session.rx_packet(packet)
return
else:
# If the Your Discriminator field is zero, the session MUST be
# selected based on some combination of other fields ...
for session in self.sessions:
if session.remote == packet.source:
session.rx_packet(packet)
return
# If a matching session is not found, a new session MAY be created,
# or the packet MAY be discarded. Note: We discard for now.
log.info('Dropping packet from %s as it doesn\'t match any '
'configured remote.', packet.source)
def run(self):
"""Main function"""
asyncio.ensure_future(self.rx_packets())
try:
log.warning('BFD Daemon fully configured.')
self.loop.run_forever()
except KeyboardInterrupt:
def shutdown_exception_handler(loop, context): # pragma: no cover
"""Do not show `asyncio.CancelledError` exceptions"""
if "exception" not in context or not \
isinstance(context["exception"], asyncio.CancelledError):
loop.default_exception_handler(context)
self.loop.set_exception_handler(shutdown_exception_handler)
log.info('Keyboard interrupt detected.')
# Wait for all tasks to be cancelled
tasks = asyncio.gather(*asyncio.Task.all_tasks(loop=self.loop),
loop=self.loop, return_exceptions=True)
tasks.add_done_callback(lambda t: self.loop.stop())
tasks.cancel()
# Keep the event loop running until it is either destroyed or all
# tasks have really terminated
while not tasks.done() and not self.loop.is_closed():
self.loop.run_forever() # pragma: no cover
finally:
self.loop.close()
| {
"repo_name": "netedgeplus/aiobfd",
"path": "aiobfd/control.py",
"copies": "1",
"size": "4397",
"license": "mit",
"hash": -7752043058861960000,
"line_mean": 38.9727272727,
"line_max": 79,
"alpha_frac": 0.5856265636,
"autogenerated": false,
"ratio": 4.401401401401402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5487027965001402,
"avg_score": null,
"num_lines": null
} |
"""aiobfd: BFD IPv4/IPv6 transport"""
import asyncio
import logging
log = logging.getLogger(__name__) # pylint: disable=I0011,C0103
class Client:
"""BFD Client for sourcing egress datagrams"""
def __init__(self):
self.transport = None
def connection_made(self, transport):
"""Socket setup correctly"""
self.transport = transport
@staticmethod
def datagram_received(_, addr):
"""Received a packet"""
log.info(('Unexpectedly received a packet on a BFD source port '
'from %s on port %d'), addr[0], addr[1])
@staticmethod
def error_received(exc):
"""Error occurred"""
log.error('Socket error received: %s', exc)
class Server:
"""BFD Server for receiving ingress datagrams """
def __init__(self, rx_queue):
self.transport = None
self.rx_queue = rx_queue
def connection_made(self, transport):
"""Socket setup correctly"""
self.transport = transport
def datagram_received(self, data, addr):
"""Received a packet"""
asyncio.ensure_future(self.rx_queue.put((data, addr[0])))
@staticmethod
def error_received(exc):
"""Error occurred"""
log.error('Socket error received: %s', exc)
| {
"repo_name": "netedgeplus/aiobfd",
"path": "aiobfd/transport.py",
"copies": "1",
"size": "1275",
"license": "mit",
"hash": 638356305853112400,
"line_mean": 25.5625,
"line_max": 72,
"alpha_frac": 0.6094117647,
"autogenerated": false,
"ratio": 4.034810126582278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144221891282278,
"avg_score": null,
"num_lines": null
} |
"""aiobfd: BFD Session with an individual remote"""
# pylint: disable=I0011,R0902,R0913
# pylint: disable=I0011,E1101
# socket.IPPROTO_IPV6 missing on Windows
import asyncio
import random
import socket
import time
import logging
import bitstring
from .transport import Client
from .packet import PACKET_FORMAT, PACKET_DEBUG_MSG
log = logging.getLogger(__name__) # pylint: disable=I0011,C0103
SOURCE_PORT_MIN = 49152
SOURCE_PORT_MAX = 65535
CONTROL_PORT = 3784
VERSION = 1
DIAG_NONE = 0 # No Diagnostic
DIAG_CONTROL_DETECTION_EXPIRED = 1 # Control Detection Time Expired
DIAG_ECHO_FAILED = 2 # Echo Function Failed
DIAG_NEIGHBOR_SIGNAL_DOWN = 3 # Neighbor Signaled Session Down
DIAG_FORWARD_PLANE_RESET = 4 # Forwarding Plane Reset
DIAG_PATH_DOWN = 5 # Path Down
DIAG_CONCAT_PATH_DOWN = 6 # Concatenated Path Down
DIAG_ADMIN_DOWN = 7 # Administratively Down
DIAG_REV_CONCAT_PATH_DOWN = 8 # Reverse Concatenated Path Down
STATE_ADMIN_DOWN = 0 # AdminDown
STATE_DOWN = 1 # Down
STATE_INIT = 2 # Init
STATE_UP = 3 # Up
CONTROL_PLANE_INDEPENDENT = False # Control Plane Independent
# Default timers
DESIRED_MIN_TX_INTERVAL = 1000000 # Minimum initial value
# Keep these fields statically disabled as they're not implemented
AUTH_TYPE = None # Authentication disabled
DEMAND_MODE = False # Demand Mode
MULTIPOINT = False # Multipoint
REQUIRED_MIN_ECHO_RX_INTERVAL = 0 # Do not support echo packet
class Session:
"""BFD session with a remote"""
def __init__(self, local, remote, family=socket.AF_UNSPEC, passive=False,
tx_interval=1000000, rx_interval=1000000, detect_mult=3):
# Argument variables
self.local = local
self.remote = remote
self.family = family
self.passive = passive
self.loop = asyncio.get_event_loop()
self.rx_interval = rx_interval # User selectable value
self.tx_interval = tx_interval # User selectable value
# As per 6.8.1. State Variables
self.state = STATE_DOWN
self.remote_state = STATE_DOWN
self.local_discr = random.randint(0, 4294967295) # 32-bit value
self.remote_discr = 0
self.local_diag = DIAG_NONE
self._desired_min_tx_interval = DESIRED_MIN_TX_INTERVAL
self._required_min_rx_interval = rx_interval
self._remote_min_rx_interval = 1
self.demand_mode = DEMAND_MODE
self.remote_demand_mode = False
self.detect_mult = detect_mult
self.auth_type = AUTH_TYPE
self.rcv_auth_seq = 0
self.xmit_auth_seq = random.randint(0, 4294967295) # 32-bit value
self.auth_seq_known = False
# State Variables beyond those defined in RFC 5880
self._async_tx_interval = 1000000
self._final_async_tx_interval = None # Used to delay timer changes
self.last_rx_packet_time = None
self._async_detect_time = None
self._final_async_detect_time = None # Used to delay timer changes
self.poll_sequence = False
self._remote_detect_mult = None
self._remote_min_tx_interval = None
self._tx_packets = None
# Create the local client and run it once to grab a port
log.debug('Setting up UDP client for %s:%s.', remote, CONTROL_PORT)
src_port = random.randint(SOURCE_PORT_MIN, SOURCE_PORT_MAX)
fam, _, _, _, addr = socket.getaddrinfo(self.local, src_port)[0]
sock = socket.socket(family=fam, type=socket.SOCK_DGRAM)
if fam == socket.AF_INET:
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, 255)
elif fam == socket.AF_INET6:
# Under Windows the IPv6 socket constant is somehow missing
# https://bugs.python.org/issue29515
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_UNICAST_HOPS, 255)
sock.bind(addr)
task = self.loop.create_datagram_endpoint(Client, sock=sock)
self.client, _ = self.loop.run_until_complete(task)
log.info('Sourcing traffic for %s:%s from %s:%s.',
remote, CONTROL_PORT,
self.client.get_extra_info('sockname')[0],
self.client.get_extra_info('sockname')[1])
# Schedule the coroutines to transmit packets and detect failures
self._tx_packets = asyncio.ensure_future(self.async_tx_packets())
asyncio.ensure_future(self.detect_async_failure())
# The transmit interval MUST be recalculated whenever
# bfd.DesiredMinTxInterval changes, or whenever bfd.RemoteMinRxInterval
# changes, and is equal to the greater of those two values.
# If either bfd.DesiredMinTxInterval is changed or
# bfd.RequiredMinRxInterval is changed, a Poll Sequence MUST be
# initiated (see section 6.5)
@property
def desired_min_tx_interval(self):
"""bfd.DesiredMinTxInterval"""
return self._desired_min_tx_interval
@desired_min_tx_interval.setter
def desired_min_tx_interval(self, value):
if value == self._desired_min_tx_interval:
return
log.info('bfd.DesiredMinTxInterval changed from %d to %d, starting '
'Poll Sequence.', self._desired_min_tx_interval, value)
# If bfd.DesiredMinTxInterval is increased and bfd.SessionState is Up,
# the actual transmission interval used MUST NOT change until the Poll
# Sequence described above has terminated.
tx_interval = max(value, self.remote_min_rx_interval)
if value > self._desired_min_tx_interval and self.state == STATE_UP:
self._final_async_tx_interval = tx_interval
log.info('Delaying increase in Tx Interval from %d to %d ...',
self._async_tx_interval, self._final_async_tx_interval)
else:
self._async_tx_interval = tx_interval
self._desired_min_tx_interval = value
self.poll_sequence = True
@property
def required_min_rx_interval(self):
"""bfd.RequiredMinRxInterval"""
return self._required_min_rx_interval
@required_min_rx_interval.setter
def required_min_rx_interval(self, value):
if value == self._required_min_rx_interval:
return
log.info('bfd.RequiredMinRxInterval changed from %d to %d, starting '
'Poll Sequence.', self._required_min_rx_interval, value)
detect_time = self.calc_detect_time(self.remote_detect_mult,
value,
self.remote_min_tx_interval)
if value < self._required_min_rx_interval and self.state == STATE_UP:
self._final_async_detect_time = detect_time
log.info('Delaying decrease in Detect Time from %d to %d ...',
self._async_detect_time, self._final_async_detect_time)
else:
self._async_detect_time = detect_time
self._required_min_rx_interval = value
self.poll_sequence = True
@property
def remote_min_rx_interval(self):
"""Property for remote_min_rx_interval so we can re-calculate
the async_tx_interval whenever this value changes"""
return self._remote_min_rx_interval
@remote_min_rx_interval.setter
def remote_min_rx_interval(self, value):
if value == self._remote_min_rx_interval:
return
# If the local system reduces its transmit interval due to
# bfd.RemoteMinRxInterval being reduced (the remote system has
# advertised a reduced value in Required Min RX Interval), and the
# remote system is not in Demand mode, the local system MUST honor
# the new interval immediately.
# We should cancel the tx_packets coro to do this.
old_tx_interval = self._async_tx_interval
self._async_tx_interval = max(value, self.desired_min_tx_interval)
if self._async_tx_interval < old_tx_interval:
log.info('Remote triggered decrease in the Tx Interval, forcing '
'change by restarting the Tx Packets process.')
self._restart_tx_packets()
self._remote_min_rx_interval = value
@property
def remote_min_tx_interval(self):
"""Property for remote_min_tx_interval so we can re-calculate
the detect_time whenever this value changes"""
return self._remote_min_tx_interval
@remote_min_tx_interval.setter
def remote_min_tx_interval(self, value):
if value == self._remote_min_tx_interval:
return
self._async_detect_time = \
self.calc_detect_time(self.remote_detect_mult,
self.required_min_rx_interval, value)
self._remote_min_tx_interval = value
@property
def remote_detect_mult(self):
"""Property for remote_detect_mult so we can re-calculate
the detect_time whenever this value changes"""
return self._remote_detect_mult
@remote_detect_mult.setter
def remote_detect_mult(self, value):
if value == self._remote_detect_mult:
return
self._async_detect_time = \
self.calc_detect_time(value, self.required_min_rx_interval,
self.remote_min_tx_interval)
self._remote_detect_mult = value
@staticmethod
def calc_detect_time(detect_mult, rx_interval, tx_interval):
"""Calculate the BFD Detection Time"""
# In Asynchronous mode, the Detection Time calculated in the local
# system is equal to the value of Detect Mult received from the remote
# system, multiplied by the agreed transmit interval of the remote
# system (the greater of bfd.RequiredMinRxInterval and the last
# received Desired Min TX Interval).
if not (detect_mult and rx_interval and tx_interval):
log.debug('BFD Detection Time calculation not possible with '
'values detect_mult: %s rx_interval: %s tx_interval: %s',
detect_mult, rx_interval, tx_interval)
return None
log.debug('BFD Detection Time calculated using '
'detect_mult: %s rx_interval: %s tx_interval: %s',
detect_mult, rx_interval, tx_interval)
return detect_mult * max(rx_interval, tx_interval)
def encode_packet(self, final=False):
"""Encode a single BFD Control packet"""
# A system MUST NOT set the Demand (D) bit unless bfd.DemandMode is 1,
# bfd.SessionState is Up, and bfd.RemoteSessionState is Up.
demand = (self.demand_mode and self.state == STATE_UP and
self.remote_state == STATE_UP)
# A BFD Control packet MUST NOT have both the Poll (P) and Final (F)
# bits set. We'll give the F bit priority, the P bit will still be set
# in the next outgoing packet if needed.
poll = self.poll_sequence if not final else False
data = {
'version': VERSION,
'diag': self.local_diag,
'state': self.state,
'poll': poll,
'final': final,
'control_plane_independent': CONTROL_PLANE_INDEPENDENT,
'authentication_present': bool(self.auth_type),
'demand_mode': demand,
'multipoint': MULTIPOINT,
'detect_mult': self.detect_mult,
'length': 24,
'my_discr': self.local_discr,
'your_discr': self.remote_discr,
'desired_min_tx_interval': self.desired_min_tx_interval,
'required_min_rx_interval': self.required_min_rx_interval,
'required_min_echo_rx_interval': REQUIRED_MIN_ECHO_RX_INTERVAL
}
log.debug(PACKET_DEBUG_MSG, VERSION, self.local_diag, self.state,
poll, final, CONTROL_PLANE_INDEPENDENT, bool(self.auth_type),
demand, MULTIPOINT, self.detect_mult, 24, self.local_discr,
self.remote_discr, self.desired_min_tx_interval,
self.required_min_rx_interval, REQUIRED_MIN_ECHO_RX_INTERVAL)
return bitstring.pack(PACKET_FORMAT, **data).bytes
def tx_packet(self, final=False):
"""Transmit a single BFD packet to the remote peer"""
log.debug('Transmitting BFD packet to %s:%s',
self.remote, CONTROL_PORT)
self.client.sendto(
self.encode_packet(final), (self.remote, CONTROL_PORT))
async def async_tx_packets(self):
"""Asynchronously transmit control packet"""
try:
while True:
# A system MUST NOT transmit BFD Control packets if
# bfd.RemoteDiscr is zero and the system is taking the Passive
# role. A system MUST NOT periodically transmit BFD Control
# packets if bfd.RemoteMinRxInterval is zero.
# A system MUST NOT periodically transmit BFD Control packets
# if Demand mode is active on the remote system
# (bfd.RemoteDemandMode) is 1, bfd.SessionState is Up, and
# bfd.RemoteSessionState is Up) and a Poll Sequence is not
# being transmitted.
if not((self.remote_discr == 0 and self.passive) or
(self.remote_min_rx_interval == 0) or
(not self.poll_sequence and
(self.remote_demand_mode == 1 and
self.state == STATE_UP and
self.remote_state == STATE_UP))):
self.tx_packet()
# The periodic transmission of BFD Control packets MUST be
# jittered on a per-packet basis by up to 25%
# If bfd.DetectMult is equal to 1, the interval between
# transmitted BFD Control packets MUST be no more than 90% of
# the negotiated transmission interval, and MUST be no less
# than 75% of the negotiated transmission interval.
if self.detect_mult == 1:
interval = \
self._async_tx_interval * random.uniform(0.75, 0.90)
else:
interval = \
self._async_tx_interval * (1 - random.uniform(0, 0.25))
await asyncio.sleep(interval/1000000)
except asyncio.CancelledError: # pragma: no cover
log.info('tx_packets() was cancelled ...')
def _restart_tx_packets(self):
"""Allow other co-routines to request a restart of tx_packets()
when needed, i.e. due to a timer change"""
log.info('Attempting to cancel tx_packets() ...')
self._tx_packets.cancel()
log.info('Restarting tx_packets() ...')
self._tx_packets = asyncio.ensure_future(self.async_tx_packets())
def rx_packet(self, packet): # pylint: disable=I0011,R0912,R0915
"""Receive packet"""
# If the A bit is set and no authentication is in use (bfd.AuthType
# is zero), the packet MUST be discarded.
if packet.authentication_present and not self.auth_type:
raise IOError('Received packet with authentication while no '
'authentication is configured locally.')
# If the A bit is clear and authentication is in use (bfd.AuthType
# is nonzero), the packet MUST be discarded.
if (not packet.authentication_present) and self.auth_type:
raise IOError('Received packet without authentication while '
'authentication is configured locally.')
# If the A bit is set authenticate the packet under the rules of
# section 6.7.
if packet.authentication_present:
log.critical('Authenticated packet received, not supported!')
return
# Set bfd.RemoteDiscr to the value of My Discriminator.
self.remote_discr = packet.my_discr
# Set bfd.RemoteState to the value of the State (Sta) field.
self.remote_state = packet.state
# Set bfd.RemoteDemandMode to the value of the Demand (D) bit.
self.remote_demand_mode = packet.demand_mode
# Set bfd.RemoteMinRxInterval to the value of Required Min RX Interval.
self.remote_min_rx_interval = packet.required_min_rx_interval
# Non-RFC defined session state that we track anyway
self.remote_detect_mult = packet.detect_mult
self.remote_min_tx_interval = packet.desired_min_tx_interval
# Implementation of the FSM in section 6.8.6
if self.state == STATE_ADMIN_DOWN:
log.warning('Received packet from %s while in Admin Down state.',
self.remote)
return
if packet.state == STATE_ADMIN_DOWN:
if self.state != STATE_DOWN:
self.local_diag = DIAG_NEIGHBOR_SIGNAL_DOWN
self.state = STATE_DOWN
self.desired_min_tx_interval = DESIRED_MIN_TX_INTERVAL
log.error('BFD remote %s signaled going ADMIN_DOWN.',
self.remote)
else:
if self.state == STATE_DOWN:
if packet.state == STATE_DOWN:
self.state = STATE_INIT
log.error('BFD session with %s going to INIT state.',
self.remote)
elif packet.state == STATE_INIT:
self.state = STATE_UP
self.desired_min_tx_interval = self.tx_interval
log.error('BFD session with %s going to UP state.',
self.remote)
elif self.state == STATE_INIT:
if packet.state in (STATE_INIT, STATE_UP):
self.state = STATE_UP
self.desired_min_tx_interval = self.tx_interval
log.error('BFD session with %s going to UP state.',
self.remote)
else:
if packet.state == STATE_DOWN:
self.local_diag = DIAG_NEIGHBOR_SIGNAL_DOWN
self.state = STATE_DOWN
log.error('BFD remote %s signaled going DOWN.',
self.remote)
# If a BFD Control packet is received with the Poll (P) bit set to 1,
# the receiving system MUST transmit a BFD Control packet with the Poll
# (P) bit clear and the Final (F) bit set as soon as practicable, ...
if packet.poll:
log.info('Received packet with Poll (P) bit set from %s, '
'sending packet with Final (F) bit set.', self.remote)
self.tx_packet(final=True)
# When the system sending the Poll sequence receives a packet with
# Final, the Poll Sequence is terminated
if packet.final:
log.info('Received packet with Final (F) bit set from %s, '
'ending Poll Sequence.', self.remote)
self.poll_sequence = False
if self._final_async_tx_interval:
log.info('Increasing Tx Interval from %d to %d now that Poll '
'Sequence has ended.', self._async_tx_interval,
self._final_async_tx_interval)
self._async_tx_interval = self._final_async_tx_interval
self._final_async_tx_interval = None
if self._final_async_detect_time:
log.info('Increasing Detect Time from %d to %d now that Poll '
'Sequence has ended.', self._async_detect_time,
self._final_async_detect_time)
self._async_detect_time = self._final_async_detect_time
self._final_async_detect_time = None
# Set the time a packet was received to right now
self.last_rx_packet_time = time.time()
log.debug('Valid packet received from %s, updating last packet time.',
self.remote)
async def detect_async_failure(self):
"""Detect if a session has failed in asynchronous mode"""
while True:
if not (self.demand_mode or self._async_detect_time is None):
# If Demand mode is not active, and a period of time equal to
# the Detection Time passes without receiving a BFD Control
# packet from the remote system, and bfd.SessionState is Init
# or Up, the session has gone down -- the local system MUST set
# bfd.SessionState to Down and bfd.LocalDiag to 1.
if self.state in (STATE_INIT, STATE_UP) and \
((time.time() - self.last_rx_packet_time) >
(self._async_detect_time/1000000)):
self.state = STATE_DOWN
self.local_diag = DIAG_CONTROL_DETECTION_EXPIRED
self.desired_min_tx_interval = DESIRED_MIN_TX_INTERVAL
log.critical('Detected BFD remote %s going DOWN!',
self.remote)
log.info('Time since last packet: %d ms; '
'Detect Time: %d ms',
(time.time() - self.last_rx_packet_time) * 1000,
self._async_detect_time/1000)
await asyncio.sleep(1/1000)
| {
"repo_name": "netedgeplus/aiobfd",
"path": "aiobfd/session.py",
"copies": "1",
"size": "21572",
"license": "mit",
"hash": 132790871942410370,
"line_mean": 45.7939262473,
"line_max": 79,
"alpha_frac": 0.5935935472,
"autogenerated": false,
"ratio": 4.124665391969407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5218258939169408,
"avg_score": null,
"num_lines": null
} |
"""aiodownload
This module contains the core classes.
"""
import aiohttp
import asyncio
import async_timeout
import logging
import os
from .strategy import DownloadStrategy, Lenient
logger = logging.getLogger(__name__)
STATUS_ATTEMPT = 'Download attempted'
STATUS_CACHE = 'Cache hit'
STATUS_DONE = 'File written'
STATUS_FAIL = 'Download failed'
STATUS_INIT = 'Initialized'
class AioDownloadBundle:
"""A container class holding properties related to the URL.
:class:`AioDownloadBundle`s get utilized by :class:`AioDownload`. They
hold information about the state of a bundle as they are processed.
:param url: URL string (ex. https://www.google.com)
:type url: str
:param info: (optional) extra information that can be injected into the bundle
:type info: dict
:param params: (optional) params for a POST request
:type params: dict
"""
def __init__(self, url, info=None, params=None):
self.attempts = 0 # value to be incremented by AioDownload when a request is attempted
self.file_path = None # determined by DownloadStrategy.url_transform
self.info = info
self.params = params
self.url = url
self._status_msg = STATUS_INIT # set by AioDownload depending of the where it is in the flow of execution
@property
def status_msg(self):
return '[URL: {}, File Path: {}, Attempts: {}, Status: {}]'.format(
self.url, self.file_path, self.attempts, self._status_msg
)
class AioDownload:
"""The core class responsible for the coordination of requests and downloads
:param client: (optional) client session, a default is instantiated if not provided
:type client: :class:`aiohttp.ClientSession`
:param download_strategy: (optional) download strategy, a default is instantiated if not provided
:type download_strategy: :class:`aiodownload.DownloadStrategy`
:param request_strategy: (optional) request strategy, a :class:`Lenient` strategy is instantiated if not provided
:type request_strategy: :class:`aiodownload.RequestStrategy`
"""
def __init__(self, client=None, download_strategy=None, request_strategy=None):
if not client:
# Get the event loop and initialize a client session if not provided
self.loop = asyncio.get_event_loop()
self.client = aiohttp.ClientSession(loop=self.loop)
else:
# Or grab the event loop from the client session
self.loop = client._loop
self.client = client
# Configuration objects managing download and request strategies
self._download_strategy = download_strategy or DownloadStrategy() # chunk_size, home, skip_cached
self._request_strategy = request_strategy or Lenient() # concurrent, max_attempts, timeout
# Bounded semaphore guards how many requests can run concurrently
self._main_semaphore = asyncio.BoundedSemaphore(self._request_strategy.concurrent)
async def main(self, bundle):
"""Main entry point for task creation with an asyncio event loop.
The number of concurrent requests is throttled using this async
method. Depending on the download strategy used, the method will call
the request_and_download async method or immediately return the bundle
indicating that the file came from cache as the file existed.
:param bundle: bundle (generally one that has just been instantiated)
:type bundle: :class:`aiodownload.AioDownloadBundle`
:return: bundle with updated properties reflecting it's final state
:rtype bundle: :class:`aiodownload.AioDownloadBundle`
"""
with (await self._main_semaphore):
bundle.file_path = self._download_strategy.get_file_path(bundle)
file_exists = os.path.isfile(bundle.file_path)
if not (file_exists and self._download_strategy.skip_cached):
while bundle._status_msg in (STATUS_ATTEMPT, STATUS_INIT, ):
if bundle._status_msg == STATUS_ATTEMPT:
logger.info(bundle.status_msg)
sleep_time = self._request_strategy.get_sleep_time(bundle)
logger.debug('Sleeping {} seconds between requests'.format(sleep_time))
await asyncio.sleep(sleep_time)
bundle = await self.request_and_download(bundle)
else:
bundle._status_msg = STATUS_CACHE
logger.info(bundle.status_msg)
return bundle
async def request_and_download(self, bundle):
"""Make an HTTP request and write it to disk. Use the download and
request strategies of the instance to implement how this is achieved.
:param bundle: bundle with it's url and file_path set
:type bundle: :class:`aiodownload.AioDownloadBundle`
:return: bundle with updated properties reflecting success or failure
:rtype bundle: :class:`aiodownload.AioDownloadBundle`
"""
with async_timeout.timeout(self._request_strategy.timeout):
try:
bundle.attempts += 1
client_method = getattr(self.client, 'post' if bundle.params else 'get')
async with client_method(bundle.url) as response:
try:
self._request_strategy.assert_response(response)
await self._download_strategy.on_success(response, bundle)
bundle._status_msg = STATUS_DONE
except AssertionError:
if self._request_strategy.retry(response):
if bundle.attempts >= self._request_strategy.max_attempts:
await self._download_strategy.on_fail(bundle)
bundle._status_msg = STATUS_FAIL
else:
bundle._status_msg = STATUS_ATTEMPT
else:
await self._download_strategy.on_fail(bundle)
bundle._status_msg = STATUS_FAIL
except ValueError as err:
bundle._status_msg = STATUS_FAIL
logger.warning(' '.join([bundle.status_msg, str(err)]))
return bundle
| {
"repo_name": "jelloslinger/aiodownload",
"path": "aiodownload/aiodownload.py",
"copies": "1",
"size": "6383",
"license": "mit",
"hash": -1540059012498056400,
"line_mean": 36.1104651163,
"line_max": 117,
"alpha_frac": 0.6318345606,
"autogenerated": false,
"ratio": 4.533380681818182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5665215242418181,
"avg_score": null,
"num_lines": null
} |
"""aiofirebase package."""
import asyncio
import json
import posixpath
from aiohttp import ClientSession
class StreamCancelled(Exception):
"""Signals the stream has been cancelled."""
class StreamAuthRevoked(Exception):
"""Signals the stream has been cancelled due to the authentication being revoked."""
class FirebaseHTTP:
"""
HTTP Client for Firebase.
Args:
base_url (str): URL to your data.
auth (string): Auth key.
loop (class:`asyncio.BaseEventLoop`): Loop.
"""
def __init__(self, base_url, auth=None, loop=None):
"""Initialise the class."""
self._loop = loop or asyncio.get_event_loop()
self._base_url = base_url
self._auth = auth
self._session = ClientSession(loop=self._loop)
async def close(self):
"""Gracefully close the session."""
await self._session.close()
async def get(self, *, path=None, params=None):
"""Perform a GET request."""
return await self._request(method='GET', path=path, params=params)
async def put(self, *, value, path=None, params=None):
"""Perform a put request."""
return await self._request(method='PUT', value=value, path=path, params=params)
async def post(self, *, value, path=None, params=None):
"""Perform a POST request."""
return await self._request(method='POST', value=value, path=path, params=params)
async def patch(self, *, value, path=None, params=None):
"""Perform a PATCH request."""
return await self._request(method='PATCH', value=value, path=path, params=params)
async def delete(self, *, path=None, params=None):
"""Perform a DELETE request."""
return await self._request(method='DELETE', path=path, params=params)
async def stream(self, *, callback, path=None):
"""Hook up to the EventSource stream."""
url = posixpath.join(self._base_url, path) if path else self._base_url
headers = {'accept': 'text/event-stream'}
async with self._session.get(url, headers=headers) as resp:
while True:
await FirebaseHTTP._iterate_over_stream(resp.content.read(), callback)
@staticmethod
async def _iterate_over_stream(iterable, callback):
"""Iterate over the EventSource stream and pass the event and data to the callback as and when we receive it."""
async for msg in iterable:
msg_str = msg.decode('utf-8').strip()
if not msg_str:
continue
key, value = msg_str.split(':', 1)
if key == 'event' and value == 'cancel':
raise StreamCancelled('The requested location is no longer allowed due to security/rules changes.')
elif key == 'event' and value == 'auth_revoked':
raise StreamAuthRevoked('The auth credentials has expired.')
elif key == 'event':
event = value
elif key == 'data':
await callback(event=event, data=json.loads(value))
async def _request(self, *, method, value=None, path=None, params=None):
"""Perform a request to Firebase."""
url = posixpath.join(self._base_url, path.strip('/')) if path else self._base_url
url += '.json'
data = json.dumps(value) if value else None
async with self._session.request(method, url, data=data, params=params) as resp:
assert resp.status == 200
return await resp.json()
| {
"repo_name": "billyshambrook/aiofirebase",
"path": "aiofirebase/__init__.py",
"copies": "1",
"size": "3513",
"license": "mit",
"hash": -3317453448974585000,
"line_mean": 36.7741935484,
"line_max": 120,
"alpha_frac": 0.6142897808,
"autogenerated": false,
"ratio": 4.172209026128266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010287433235811924,
"num_lines": 93
} |
"""aiohttp bassd Socket Mode client
* https://api.slack.com/apis/connections/socket
* https://slack.dev/python-slack-sdk/socket-mode/
* https://pypi.org/project/aiohttp/
"""
import asyncio
import logging
from asyncio import Future
from asyncio import Queue
from logging import Logger
from typing import Union, Optional, List, Callable, Awaitable
import aiohttp
from aiohttp import ClientWebSocketResponse, WSMessage, WSMsgType, ClientConnectionError
from slack_sdk.proxy_env_variable_loader import load_http_proxy_from_env
from slack_sdk.socket_mode.async_client import AsyncBaseSocketModeClient
from slack_sdk.socket_mode.async_listeners import (
AsyncWebSocketMessageListener,
AsyncSocketModeRequestListener,
)
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.web.async_client import AsyncWebClient
class SocketModeClient(AsyncBaseSocketModeClient):
logger: Logger
web_client: AsyncWebClient
app_token: str
wss_uri: Optional[str]
auto_reconnect_enabled: bool
message_queue: Queue
message_listeners: List[
Union[
AsyncWebSocketMessageListener,
Callable[
["AsyncBaseSocketModeClient", dict, Optional[str]], Awaitable[None]
],
]
]
socket_mode_request_listeners: List[
Union[
AsyncSocketModeRequestListener,
Callable[["AsyncBaseSocketModeClient", SocketModeRequest], Awaitable[None]],
]
]
message_receiver: Optional[Future]
message_processor: Future
proxy: Optional[str]
ping_interval: float
current_session: Optional[ClientWebSocketResponse]
current_session_monitor: Optional[Future]
auto_reconnect_enabled: bool
default_auto_reconnect_enabled: bool
closed: bool
on_message_listeners: List[Callable[[WSMessage], Awaitable[None]]]
on_error_listeners: List[Callable[[WSMessage], Awaitable[None]]]
on_close_listeners: List[Callable[[WSMessage], Awaitable[None]]]
def __init__(
self,
app_token: str,
logger: Optional[Logger] = None,
web_client: Optional[AsyncWebClient] = None,
proxy: Optional[str] = None,
auto_reconnect_enabled: bool = True,
ping_interval: float = 10,
on_message_listeners: Optional[List[Callable[[WSMessage], None]]] = None,
on_error_listeners: Optional[List[Callable[[WSMessage], None]]] = None,
on_close_listeners: Optional[List[Callable[[WSMessage], None]]] = None,
):
"""Socket Mode client
Args:
app_token: App-level token
logger: Custom logger
web_client: Web API client
auto_reconnect_enabled: True if automatic reconnection is enabled (default: True)
ping_interval: interval for ping-pong with Slack servers (seconds)
proxy: the HTTP proxy URL
on_message_listeners: listener functions for on_message
on_error_listeners: listener functions for on_error
on_close_listeners: listener functions for on_close
"""
self.app_token = app_token
self.logger = logger or logging.getLogger(__name__)
self.web_client = web_client or AsyncWebClient()
self.closed = False
self.proxy = proxy
if self.proxy is None or len(self.proxy.strip()) == 0:
env_variable = load_http_proxy_from_env(self.logger)
if env_variable is not None:
self.proxy = env_variable
self.default_auto_reconnect_enabled = auto_reconnect_enabled
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
self.ping_interval = ping_interval
self.wss_uri = None
self.message_queue = Queue()
self.message_listeners = []
self.socket_mode_request_listeners = []
self.current_session = None
self.current_session_monitor = None
# https://docs.aiohttp.org/en/stable/client_reference.html
# Unless you are connecting to a large, unknown number of different servers
# over the lifetime of your application,
# it is suggested you use a single session for the lifetime of your application
# to benefit from connection pooling.
self.aiohttp_client_session = aiohttp.ClientSession()
self.on_message_listeners = on_message_listeners or []
self.on_error_listeners = on_error_listeners or []
self.on_close_listeners = on_close_listeners or []
self.message_receiver = None
self.message_processor = asyncio.ensure_future(self.process_messages())
async def monitor_current_session(self) -> None:
while not self.closed:
await asyncio.sleep(self.ping_interval)
try:
if self.auto_reconnect_enabled and (
self.current_session is None or self.current_session.closed
):
self.logger.info(
"The session seems to be already closed. Going to reconnect..."
)
await self.connect_to_new_endpoint()
except Exception as e:
self.logger.error(
"Failed to check the current session or reconnect to the server "
f"(error: {type(e).__name__}, message: {e})"
)
async def receive_messages(self) -> None:
consecutive_error_count = 0
while not self.closed:
try:
message: WSMessage = await self.current_session.receive()
if self.logger.level <= logging.DEBUG:
type = WSMsgType(message.type)
message_type = type.name if type is not None else message.type
message_data = message.data
if isinstance(message_data, bytes):
message_data = message_data.decode("utf-8")
self.logger.debug(
f"Received message (type: {message_type}, data: {message_data}, extra: {message.extra})"
)
if message is not None:
if message.type == WSMsgType.TEXT:
message_data = message.data
await self.enqueue_message(message_data)
for listener in self.on_message_listeners:
await listener(message)
elif message.type == WSMsgType.CLOSE:
if self.auto_reconnect_enabled:
self.logger.info(
"Received CLOSE event. Going to reconnect..."
)
await self.connect_to_new_endpoint()
for listener in self.on_close_listeners:
await listener(message)
elif message.type == WSMsgType.ERROR:
for listener in self.on_error_listeners:
await listener(message)
elif message.type == WSMsgType.CLOSED:
await asyncio.sleep(self.ping_interval)
continue
consecutive_error_count = 0
except Exception as e:
consecutive_error_count += 1
self.logger.error(
f"Failed to receive or enqueue a message: {type(e).__name__}, {e}"
)
if isinstance(e, ClientConnectionError):
await asyncio.sleep(self.ping_interval)
else:
await asyncio.sleep(consecutive_error_count)
async def connect(self):
old_session = None if self.current_session is None else self.current_session
if self.wss_uri is None:
self.wss_uri = await self.issue_new_wss_url()
self.current_session = await self.aiohttp_client_session.ws_connect(
self.wss_uri,
heartbeat=self.ping_interval,
proxy=self.proxy,
)
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
self.logger.info("A new session has been established")
if self.current_session_monitor is None:
self.current_session_monitor = asyncio.ensure_future(
self.monitor_current_session()
)
if self.message_receiver is None:
self.message_receiver = asyncio.ensure_future(self.receive_messages())
if old_session is not None:
await old_session.close()
self.logger.info("The old session has been abandoned")
async def disconnect(self):
if self.current_session is not None:
await self.current_session.close()
self.logger.info("The session has been abandoned")
async def send_message(self, message: str):
if self.logger.level <= logging.DEBUG:
self.logger.debug(f"Sending a message: {message}")
await self.current_session.send_str(message)
async def close(self):
self.closed = True
self.auto_reconnect_enabled = False
await self.disconnect()
self.message_processor.cancel()
if self.current_session_monitor is not None:
self.current_session_monitor.cancel()
if self.message_receiver is not None:
self.message_receiver.cancel()
if self.aiohttp_client_session is not None:
await self.aiohttp_client_session.close()
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/socket_mode/aiohttp/__init__.py",
"copies": "1",
"size": "9532",
"license": "mit",
"hash": -272533492160883330,
"line_mean": 40.0862068966,
"line_max": 112,
"alpha_frac": 0.6027066723,
"autogenerated": false,
"ratio": 4.450046685340803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5552753357640803,
"avg_score": null,
"num_lines": null
} |
"""Aiohttp cleverbot.io API library."""
import json
import asyncio
import aiohttp
class CleverBot:
def __init__(self, user, key, async_init=True, loop=None):
self.body = {
'user': user,
'key': key
}
self.conversation = []
if loop is None:
loop = asyncio.get_event_loop()
if async_init:
loop.create_task(self.async_init(loop))
async def async_init(self, loop):
self.session = aiohttp.ClientSession(loop=loop)
async with self.session.post('https://cleverbot.io/1.0/create', data=self.body) as r:
j = await r.json()
self.body['nick'] = j['nick']
async def ask(self, text):
self.body['text'] = text
self.conversation.append(text)
async with self.session.post('https://cleverbot.io/1.0/ask',
data=self.body) as resp:
r = await resp.json()
if r['status'] == 'success':
self.conversation.append(r['response'])
return r['response']
else:
return 'Hmm, my brain doesn\'t seem to be working right now.'
| {
"repo_name": "Armored-Dragon/goldmine",
"path": "util/cleverbot.py",
"copies": "1",
"size": "1162",
"license": "mit",
"hash": 6052570529330879000,
"line_mean": 31.2777777778,
"line_max": 93,
"alpha_frac": 0.5473321859,
"autogenerated": false,
"ratio": 3.7483870967741937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4795719282674194,
"avg_score": null,
"num_lines": null
} |
"""aiohttp request argument parsing module.
Example: ::
import asyncio
from aiohttp import web
from webargs import fields
from webargs.aiohttpparser import use_args
hello_args = {
'name': fields.Str(required=True)
}
@asyncio.coroutine
@use_args(hello_args)
def index(request, args):
return web.Response(
body='Hello {}'.format(args['name']).encode('utf-8')
)
app = web.Application()
app.router.add_route('GET', '/', index)
"""
import typing
from aiohttp import web
from aiohttp.web import Request
from aiohttp import web_exceptions
from marshmallow import Schema, ValidationError
from marshmallow.fields import Field
from webargs import core
from webargs.core import json
from webargs.asyncparser import AsyncParser
def is_json_request(req: Request) -> bool:
content_type = req.content_type
return core.is_json(content_type)
class HTTPUnprocessableEntity(web.HTTPClientError):
status_code = 422
# Mapping of status codes to exception classes
# Adapted from werkzeug
exception_map = {422: HTTPUnprocessableEntity}
def _find_exceptions() -> None:
for name in web_exceptions.__all__:
obj = getattr(web_exceptions, name)
try:
is_http_exception = issubclass(obj, web_exceptions.HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.status_code is None:
continue
old_obj = exception_map.get(obj.status_code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
exception_map[obj.status_code] = obj
# Collect all exceptions from aiohttp.web_exceptions
_find_exceptions()
del _find_exceptions
class AIOHTTPParser(AsyncParser):
"""aiohttp request argument parser."""
__location_map__ = dict(
match_info="parse_match_info",
path="parse_match_info",
**core.Parser.__location_map__
)
def parse_querystring(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a querystring value from the request."""
return core.get_value(req.query, name, field)
async def parse_form(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a form value from the request."""
post_data = self._cache.get("post")
if post_data is None:
self._cache["post"] = await req.post()
return core.get_value(self._cache["post"], name, field)
async def parse_json(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a json value from the request."""
json_data = self._cache.get("json")
if json_data is None:
if not (req.body_exists and is_json_request(req)):
return core.missing
try:
json_data = await req.json(loads=json.loads)
except json.JSONDecodeError as e:
if e.doc == "":
return core.missing
else:
return self.handle_invalid_json_error(e, req)
self._cache["json"] = json_data
return core.get_value(json_data, name, field, allow_many_nested=True)
def parse_headers(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a value from the header data."""
return core.get_value(req.headers, name, field)
def parse_cookies(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a value from the cookiejar."""
return core.get_value(req.cookies, name, field)
def parse_files(self, req: Request, name: str, field: Field) -> None:
raise NotImplementedError(
"parse_files is not implemented. You may be able to use parse_form for "
"parsing upload data."
)
def parse_match_info(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a value from the request's ``match_info``."""
return core.get_value(req.match_info, name, field)
def get_request_from_view_args(
self, view: typing.Callable, args: typing.Iterable, kwargs: typing.Mapping
) -> Request:
"""Get request object from a handler function or method. Used internally by
``use_args`` and ``use_kwargs``.
"""
req = None
for arg in args:
if isinstance(arg, web.Request):
req = arg
break
elif isinstance(arg, web.View):
req = arg.request
break
assert isinstance(req, web.Request), "Request argument not found for handler"
return req
def handle_error(
self,
error: ValidationError,
req: Request,
schema: Schema,
error_status_code: typing.Union[int, None] = None,
error_headers: typing.Union[typing.Mapping[str, str], None] = None,
) -> "typing.NoReturn":
"""Handle ValidationErrors and return a JSON response of error messages
to the client.
"""
error_class = exception_map.get(
error_status_code or self.DEFAULT_VALIDATION_STATUS
)
if not error_class:
raise LookupError("No exception for {0}".format(error_status_code))
headers = error_headers
raise error_class(
body=json.dumps(error.messages).encode("utf-8"),
headers=headers,
content_type="application/json",
)
def handle_invalid_json_error(
self, error: json.JSONDecodeError, req: Request, *args, **kwargs
) -> "typing.NoReturn":
error_class = exception_map[400]
messages = {"json": ["Invalid JSON body."]}
raise error_class(
body=json.dumps(messages).encode("utf-8"), content_type="application/json"
)
parser = AIOHTTPParser()
use_args = parser.use_args # type: typing.Callable
use_kwargs = parser.use_kwargs # type: typing.Callable
| {
"repo_name": "sloria/webargs",
"path": "src/webargs/aiohttpparser.py",
"copies": "1",
"size": "5966",
"license": "mit",
"hash": 9171119074826655000,
"line_mean": 32.5168539326,
"line_max": 86,
"alpha_frac": 0.6149849145,
"autogenerated": false,
"ratio": 3.9933065595716197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007311915108693743,
"num_lines": 178
} |
"""Aiohttp test utils."""
import asyncio
from contextlib import contextmanager
import functools
import json as _json
from unittest import mock
from urllib.parse import urlparse, parse_qs
import yarl
from aiohttp.client_exceptions import ClientResponseError
class AiohttpClientMocker:
"""Mock Aiohttp client requests."""
def __init__(self):
"""Initialize the request mocker."""
self._mocks = []
self._cookies = {}
self.mock_calls = []
def request(self, method, url, *,
auth=None,
status=200,
text=None,
data=None,
content=None,
json=None,
params=None,
headers={},
exc=None,
cookies=None):
"""Mock a request."""
if json:
text = _json.dumps(json)
if text:
content = text.encode('utf-8')
if content is None:
content = b''
if params:
url = str(yarl.URL(url).with_query(params))
self._mocks.append(AiohttpClientMockResponse(
method, url, status, content, cookies, exc, headers))
def get(self, *args, **kwargs):
"""Register a mock get request."""
self.request('get', *args, **kwargs)
def put(self, *args, **kwargs):
"""Register a mock put request."""
self.request('put', *args, **kwargs)
def post(self, *args, **kwargs):
"""Register a mock post request."""
self.request('post', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Register a mock delete request."""
self.request('delete', *args, **kwargs)
def options(self, *args, **kwargs):
"""Register a mock options request."""
self.request('options', *args, **kwargs)
@property
def call_count(self):
"""Return the number of requests made."""
return len(self.mock_calls)
def clear_requests(self):
"""Reset mock calls."""
self._mocks.clear()
self._cookies.clear()
self.mock_calls.clear()
@asyncio.coroutine
# pylint: disable=unused-variable
def match_request(self, method, url, *, data=None, auth=None, params=None,
headers=None, allow_redirects=None, timeout=None,
json=None):
"""Match a request against pre-registered requests."""
data = data or json
for response in self._mocks:
if response.match_request(method, url, params):
self.mock_calls.append((method, url, data, headers))
if response.exc:
raise response.exc
return response
assert False, "No mock registered for {} {} {}".format(method.upper(),
url, params)
class AiohttpClientMockResponse:
"""Mock Aiohttp client response."""
def __init__(self, method, url, status, response, cookies=None, exc=None,
headers=None):
"""Initialize a fake response."""
self.method = method
self._url = url
self._url_parts = (None if hasattr(url, 'search')
else urlparse(url.lower()))
self.status = status
self.response = response
self.exc = exc
self._headers = headers or {}
self._cookies = {}
if cookies:
for name, data in cookies.items():
cookie = mock.MagicMock()
cookie.value = data
self._cookies[name] = cookie
if isinstance(response, list):
self.content = mock.MagicMock()
@asyncio.coroutine
def read(*argc, **kwargs):
"""Read content stream mock."""
if self.response:
return self.response.pop()
return None
self.content.read = read
def match_request(self, method, url, params=None):
"""Test if response answers request."""
if method.lower() != self.method.lower():
return False
if params:
url = str(yarl.URL(url).with_query(params))
# regular expression matching
if self._url_parts is None:
return self._url.search(url) is not None
req = urlparse(url.lower())
if self._url_parts.scheme and req.scheme != self._url_parts.scheme:
return False
if self._url_parts.netloc and req.netloc != self._url_parts.netloc:
return False
if (req.path or '/') != (self._url_parts.path or '/'):
return False
# Ensure all query components in matcher are present in the request
request_qs = parse_qs(req.query)
matcher_qs = parse_qs(self._url_parts.query)
for key, vals in matcher_qs.items():
for val in vals:
try:
request_qs.get(key, []).remove(val)
except ValueError:
return False
return True
@property
def headers(self):
"""Return content_type."""
return self._headers
@property
def cookies(self):
"""Return dict of cookies."""
return self._cookies
@asyncio.coroutine
def read(self):
"""Return mock response."""
return self.response
@asyncio.coroutine
def text(self, encoding='utf-8'):
"""Return mock response as a string."""
return self.response.decode(encoding)
@asyncio.coroutine
def json(self, encoding='utf-8'):
"""Return mock response as a json."""
return _json.loads(self.response.decode(encoding))
@asyncio.coroutine
def release(self):
"""Mock release."""
pass
def raise_for_status(self):
"""Raise error if status is 400 or higher."""
if self.status >= 400:
raise ClientResponseError(
None, None, code=self.status, headers=self.headers)
def close(self):
"""Mock close."""
pass
@contextmanager
def mock_aiohttp_client():
"""Context manager to mock aiohttp client."""
mocker = AiohttpClientMocker()
with mock.patch('aiohttp.ClientSession') as mock_session:
instance = mock_session()
instance.request = mocker.match_request
for method in ('get', 'post', 'put', 'options', 'delete'):
setattr(instance, method,
functools.partial(mocker.match_request, method))
yield mocker
| {
"repo_name": "tboyce1/home-assistant",
"path": "tests/test_util/aiohttp.py",
"copies": "2",
"size": "6578",
"license": "apache-2.0",
"hash": 1085597680468487200,
"line_mean": 29.1743119266,
"line_max": 78,
"alpha_frac": 0.5478868957,
"autogenerated": false,
"ratio": 4.508567511994517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6056454407694518,
"avg_score": null,
"num_lines": null
} |
"""Aiohttp test utils."""
import asyncio
from contextlib import contextmanager
import functools
import json as _json
from unittest import mock
from urllib.parse import urlparse, parse_qs
class AiohttpClientMocker:
"""Mock Aiohttp client requests."""
def __init__(self):
"""Initialize the request mocker."""
self._mocks = []
self.mock_calls = []
def request(self, method, url, *,
auth=None,
status=200,
text=None,
content=None,
json=None):
"""Mock a request."""
if json:
text = _json.dumps(json)
if text:
content = text.encode('utf-8')
if content is None:
content = b''
self._mocks.append(AiohttpClientMockResponse(
method, url, status, content))
def get(self, *args, **kwargs):
"""Register a mock get request."""
self.request('get', *args, **kwargs)
def put(self, *args, **kwargs):
"""Register a mock put request."""
self.request('put', *args, **kwargs)
def post(self, *args, **kwargs):
"""Register a mock post request."""
self.request('post', *args, **kwargs)
def delete(self, *args, **kwargs):
"""Register a mock delete request."""
self.request('delete', *args, **kwargs)
def options(self, *args, **kwargs):
"""Register a mock options request."""
self.request('options', *args, **kwargs)
@property
def call_count(self):
"""Number of requests made."""
return len(self.mock_calls)
@asyncio.coroutine
def match_request(self, method, url, *, auth=None): \
# pylint: disable=unused-variable
"""Match a request against pre-registered requests."""
for response in self._mocks:
if response.match_request(method, url):
self.mock_calls.append((method, url))
return response
assert False, "No mock registered for {} {}".format(method.upper(),
url)
class AiohttpClientMockResponse:
"""Mock Aiohttp client response."""
def __init__(self, method, url, status, response):
"""Initialize a fake response."""
self.method = method
self._url = url
self._url_parts = (None if hasattr(url, 'search')
else urlparse(url.lower()))
self.status = status
self.response = response
def match_request(self, method, url):
"""Test if response answers request."""
if method.lower() != self.method.lower():
return False
# regular expression matching
if self._url_parts is None:
return self._url.search(url) is not None
req = urlparse(url.lower())
if self._url_parts.scheme and req.scheme != self._url_parts.scheme:
return False
if self._url_parts.netloc and req.netloc != self._url_parts.netloc:
return False
if (req.path or '/') != (self._url_parts.path or '/'):
return False
# Ensure all query components in matcher are present in the request
request_qs = parse_qs(req.query)
matcher_qs = parse_qs(self._url_parts.query)
for key, vals in matcher_qs.items():
for val in vals:
try:
request_qs.get(key, []).remove(val)
except ValueError:
return False
return True
@asyncio.coroutine
def read(self):
"""Return mock response."""
return self.response
@asyncio.coroutine
def text(self, encoding='utf-8'):
"""Return mock response as a string."""
return self.response.decode(encoding)
@asyncio.coroutine
def release(self):
"""Mock release."""
pass
@contextmanager
def mock_aiohttp_client():
"""Context manager to mock aiohttp client."""
mocker = AiohttpClientMocker()
with mock.patch('aiohttp.ClientSession') as mock_session:
instance = mock_session()
for method in ('get', 'post', 'put', 'options', 'delete'):
setattr(instance, method,
functools.partial(mocker.match_request, method))
yield mocker
| {
"repo_name": "lukas-hetzenecker/home-assistant",
"path": "tests/test_util/aiohttp.py",
"copies": "5",
"size": "4339",
"license": "mit",
"hash": -3777463014769264600,
"line_mean": 29.3426573427,
"line_max": 75,
"alpha_frac": 0.5593454713,
"autogenerated": false,
"ratio": 4.445696721311475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 143
} |
"""Aiohttp test utils."""
import asyncio
from contextlib import contextmanager
import json as _json
import re
from unittest import mock
from urllib.parse import parse_qs
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientError, ClientResponseError
from aiohttp.streams import StreamReader
from yarl import URL
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
RETYPE = type(re.compile(""))
def mock_stream(data):
"""Mock a stream with data."""
protocol = mock.Mock(_reading_paused=False)
stream = StreamReader(protocol, limit=2 ** 16)
stream.feed_data(data)
stream.feed_eof()
return stream
class AiohttpClientMocker:
"""Mock Aiohttp client requests."""
def __init__(self):
"""Initialize the request mocker."""
self._mocks = []
self._cookies = {}
self.mock_calls = []
def request(
self,
method,
url,
*,
auth=None,
status=200,
text=None,
data=None,
content=None,
json=None,
params=None,
headers={},
exc=None,
cookies=None,
side_effect=None,
):
"""Mock a request."""
if not isinstance(url, RETYPE):
url = URL(url)
if params:
url = url.with_query(params)
self._mocks.append(
AiohttpClientMockResponse(
method=method,
url=url,
status=status,
response=content,
json=json,
text=text,
cookies=cookies,
exc=exc,
headers=headers,
side_effect=side_effect,
)
)
def get(self, *args, **kwargs):
"""Register a mock get request."""
self.request("get", *args, **kwargs)
def put(self, *args, **kwargs):
"""Register a mock put request."""
self.request("put", *args, **kwargs)
def post(self, *args, **kwargs):
"""Register a mock post request."""
self.request("post", *args, **kwargs)
def delete(self, *args, **kwargs):
"""Register a mock delete request."""
self.request("delete", *args, **kwargs)
def options(self, *args, **kwargs):
"""Register a mock options request."""
self.request("options", *args, **kwargs)
def patch(self, *args, **kwargs):
"""Register a mock patch request."""
self.request("patch", *args, **kwargs)
@property
def call_count(self):
"""Return the number of requests made."""
return len(self.mock_calls)
def clear_requests(self):
"""Reset mock calls."""
self._mocks.clear()
self._cookies.clear()
self.mock_calls.clear()
def create_session(self, loop):
"""Create a ClientSession that is bound to this mocker."""
session = ClientSession(loop=loop)
# Setting directly on `session` will raise deprecation warning
object.__setattr__(session, "_request", self.match_request)
return session
async def match_request(
self,
method,
url,
*,
data=None,
auth=None,
params=None,
headers=None,
allow_redirects=None,
timeout=None,
json=None,
cookies=None,
**kwargs,
):
"""Match a request against pre-registered requests."""
data = data or json
url = URL(url)
if params:
url = url.with_query(params)
for response in self._mocks:
if response.match_request(method, url, params):
self.mock_calls.append((method, url, data, headers))
if response.side_effect:
response = await response.side_effect(method, url, data)
if response.exc:
raise response.exc
return response
assert False, "No mock registered for {} {} {}".format(
method.upper(), url, params
)
class AiohttpClientMockResponse:
"""Mock Aiohttp client response."""
def __init__(
self,
method,
url,
status=200,
response=None,
json=None,
text=None,
cookies=None,
exc=None,
headers=None,
side_effect=None,
):
"""Initialize a fake response."""
if json is not None:
text = _json.dumps(json)
if text is not None:
response = text.encode("utf-8")
if response is None:
response = b""
self.method = method
self._url = url
self.status = status
self.response = response
self.exc = exc
self.side_effect = side_effect
self._headers = headers or {}
self._cookies = {}
if cookies:
for name, data in cookies.items():
cookie = mock.MagicMock()
cookie.value = data
self._cookies[name] = cookie
def match_request(self, method, url, params=None):
"""Test if response answers request."""
if method.lower() != self.method.lower():
return False
# regular expression matching
if isinstance(self._url, RETYPE):
return self._url.search(str(url)) is not None
if (
self._url.scheme != url.scheme
or self._url.host != url.host
or self._url.path != url.path
):
return False
# Ensure all query components in matcher are present in the request
request_qs = parse_qs(url.query_string)
matcher_qs = parse_qs(self._url.query_string)
for key, vals in matcher_qs.items():
for val in vals:
try:
request_qs.get(key, []).remove(val)
except ValueError:
return False
return True
@property
def headers(self):
"""Return content_type."""
return self._headers
@property
def cookies(self):
"""Return dict of cookies."""
return self._cookies
@property
def url(self):
"""Return yarl of URL."""
return self._url
@property
def content_type(self):
"""Return yarl of URL."""
return self._headers.get("content-type")
@property
def content(self):
"""Return content."""
return mock_stream(self.response)
async def read(self):
"""Return mock response."""
return self.response
async def text(self, encoding="utf-8"):
"""Return mock response as a string."""
return self.response.decode(encoding)
async def json(self, encoding="utf-8", content_type=None):
"""Return mock response as a json."""
return _json.loads(self.response.decode(encoding))
def release(self):
"""Mock release."""
def raise_for_status(self):
"""Raise error if status is 400 or higher."""
if self.status >= 400:
request_info = mock.Mock(real_url="http://example.com")
raise ClientResponseError(
request_info=request_info,
history=None,
code=self.status,
headers=self.headers,
)
def close(self):
"""Mock close."""
@contextmanager
def mock_aiohttp_client():
"""Context manager to mock aiohttp client."""
mocker = AiohttpClientMocker()
def create_session(hass, *args):
session = mocker.create_session(hass.loop)
async def close_session(event):
"""Close session."""
await session.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, close_session)
return session
with mock.patch(
"homeassistant.helpers.aiohttp_client.async_create_clientsession",
side_effect=create_session,
):
yield mocker
class MockLongPollSideEffect:
"""Imitate a long_poll request.
It should be created and used as a side effect for a GET/PUT/etc. request.
Once created, actual responses are queued with queue_response
If queue is empty, will await until done.
"""
def __init__(self):
"""Initialize the queue."""
self.semaphore = asyncio.Semaphore(0)
self.response_list = []
self.stopping = False
async def __call__(self, method, url, data):
"""Fetch the next response from the queue or wait until the queue has items."""
if self.stopping:
raise ClientError()
await self.semaphore.acquire()
kwargs = self.response_list.pop(0)
return AiohttpClientMockResponse(method=method, url=url, **kwargs)
def queue_response(self, **kwargs):
"""Add a response to the long_poll queue."""
self.response_list.append(kwargs)
self.semaphore.release()
def stop(self):
"""Stop the current request and future ones.
This avoids an exception if there is someone waiting when exiting test.
"""
self.stopping = True
self.queue_response(exc=ClientError())
| {
"repo_name": "tboyce021/home-assistant",
"path": "tests/test_util/aiohttp.py",
"copies": "4",
"size": "9174",
"license": "apache-2.0",
"hash": -4818059551611379000,
"line_mean": 26.8,
"line_max": 87,
"alpha_frac": 0.5601700458,
"autogenerated": false,
"ratio": 4.3978906999041225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6958060745704123,
"avg_score": null,
"num_lines": null
} |
"""AioHTTP Web Server Setup"""
import logging
import subprocess
import time
import os
import base64
from cryptography import fernet
import aiohttp
import aiohttp_jinja2
import jinja2
import markdown
import markupsafe
from aiohttp_session import setup as setup_session
from aiohttp_session.cookie_storage import EncryptedCookieStorage
from aiohttp_security import (authorized_userid,
setup as setup_security,
SessionIdentityPolicy, AbstractAuthorizationPolicy)
from .config import (BOT_NAME, APP_KEY, APP_ID, GITTER_TOKEN, GITTER_CHANNELS,
APP_CLIENT_ID, APP_CLIENT_SECRET)
from .views import web_routes, navigation_bar
from .chat import GitterListener
from .. import utils
from ..githubhandler import GitHubAppHandler, AiohttpGitHubHandler
from .. import __version__ as VERSION
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#: Override this to get more verbose logging of web app (and, if launched
#: with web frontend, the worker).
LOGLEVEL = 'INFO'
class AuthorizationPolicy(AbstractAuthorizationPolicy):
"""Authorization policy for web interface"""
def __init__(self, app):
self.app = app
async def authorized_userid(self, identity: str) -> AiohttpGitHubHandler:
"""Retrieve authorized user id.
Arguments:
identity: random string identifying user. We use bearer token.
Returns:
Logged in Github API client.
"""
return await self.app['ghappapi'].get_github_user_api(identity)
async def permits(self, identity: str, permission: str, context=None) -> bool:
"""Check user permissions.
Returns:
True if the **identity** is allowed the **permission**
in the current **context**.
"""
# Fail if no identity
if identity is None:
return False
org, _, team = permission.partition('/')
# Fail if no permissions requested
if not org:
logger.error("Internal error: checking for empty permission not allowed")
return False
# Fail if not logged in
userapi = await self.app['ghappapi'].get_github_user_api(identity)
if not userapi:
return False
# Fail if not in requested org
if org not in await userapi.get_user_orgs():
return False
# Fail if team requested and user not in team
if team and not await userapi.is_team_member(userapi.username, team):
return False
return True
async def jinja_defaults(request):
"""Provides all web views using aiohttp-jinja2 with default values
Values are:
- **user**: The `AiohttpGitHubHandler for the user if a user is logged in.
- **version**: The version of the bot running
- **navigation_bar**: List of 3-tuples for building nav bar. Each tuple
comprises the location, ID and natural name for the page to be added
to the main nav bar.
- **active_page**: The ID of the currently rendered page. This is set in
the aiohttp router as ``name`` field.
- **title**: The title of the current page. Parsed from **navigation_bar**
using **active_page**.
"""
active_page = request.match_info.route.name
try:
title = next(item for item in navigation_bar if item[1] == active_page)[2]
except StopIteration:
title = 'Unknown'
ghapi = await authorized_userid(request)
return {
'user': ghapi,
'version': VERSION,
'navigation_bar': navigation_bar,
'active_page': active_page,
'title': title,
'request': request,
}
md2html = markdown.Markdown(extensions=[
'markdown.extensions.fenced_code',
'markdown.extensions.tables',
'markdown.extensions.admonition',
'markdown.extensions.codehilite',
'markdown.extensions.sane_lists',
])
def jinja2_filter_markdown(text):
return markupsafe.Markup(md2html.reset().convert(text))
@aiohttp.web.middleware
async def handle_errors(request, handler):
try:
return await handler(request)
except aiohttp.web.HTTPException as exc:
if exc.status in (302,):
raise
try:
return aiohttp_jinja2.render_template('bot_40x.html', request, {'exc':exc})
except KeyError as XYZ:
raise exc
async def start():
"""Initialize App
This function is the entry point for wrappers that take an app factory
as argument, notably gunicorn:
$ gunicorn bioconda_utils.bot:init_app \
--worker-class aiohttp.worker.GunicornWebWorker \
--reload
"""
utils.setup_logger('bioconda_utils', LOGLEVEL, prefix="")
logger.info("Starting bot (version=%s)", VERSION)
app = aiohttp.web.Application()
app['name'] = BOT_NAME
# Set up session storage
fernet_key = fernet.Fernet.generate_key()
secret_key = base64.urlsafe_b64decode(fernet_key)
session_store = EncryptedCookieStorage(secret_key)
setup_session(app, session_store)
# Set up security
setup_security(app, SessionIdentityPolicy(), AuthorizationPolicy(app))
# Set up jinja2 rendering
loader = jinja2.PackageLoader('bioconda_utils', 'templates')
aiohttp_jinja2.setup(app, loader=loader,
context_processors=[jinja_defaults],
filters={'markdown': jinja2_filter_markdown})
# Set up error handlers
app.middlewares.append(handle_errors)
# Prepare persistent client session
app['client_session'] = aiohttp.ClientSession()
# Create Github client
app['ghappapi'] = GitHubAppHandler(app['client_session'],
BOT_NAME, APP_KEY, APP_ID,
APP_CLIENT_ID, APP_CLIENT_SECRET)
# Create Gitter Client (background process)
app['gitter_listener'] = GitterListener(
app, GITTER_TOKEN, GITTER_CHANNELS, app['client_session'],
app['ghappapi'])
# Add routes collected above
app.add_routes(web_routes)
# Set up static files
utils_path = os.path.dirname(os.path.dirname(__file__))
app.router.add_static("/css", os.path.join(utils_path, 'templates/css'))
# Close session - this needs to be at the end of the
# on shutdown pieces so the client session remains available
# until everything is done.
async def close_session(app):
await app['client_session'].close()
app.on_shutdown.append(close_session)
return app
async def start_with_celery():
"""Initialize app and launch internal celery worker
This isn't simply a flag for `start` because async app factories
cannot (easily) receive parameters from the gunicorn commandline.
"""
app = await start()
proc = subprocess.Popen([
'celery',
'-A', 'bioconda_utils.bot.worker',
'worker',
'-l', LOGLEVEL,
'--without-heartbeat',
'-c', '1',
])
app['celery_worker'] = proc
async def collect_worker(app):
# We don't use celery.broadcast('shutdown') as that seems to trigger
# an immediate reload. Instead, just send a sigterm.
proc = app['celery_worker']
logger.info("Terminating celery worker: sending sigterm")
proc.terminate()
wait = 10
if proc.poll() is None:
for second in range(wait):
logger.info("Terminating celery worker: waiting %i/%i", second, wait)
time.sleep(1)
if proc.poll() is not None:
break
else:
logger.info("Terminating celery worker: failed. Sending sigkill")
proc.kill()
logger.info("Terminating celery worker: collecting process")
app['celery_worker'].wait()
logger.info("Terminating celery worker: done")
app.on_shutdown.append(collect_worker)
return app
| {
"repo_name": "bioconda/bioconda-utils",
"path": "bioconda_utils/bot/web.py",
"copies": "1",
"size": "7964",
"license": "mit",
"hash": -2179244879578885400,
"line_mean": 31.9090909091,
"line_max": 87,
"alpha_frac": 0.6377448518,
"autogenerated": false,
"ratio": 4.198207696362678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000949486968490659,
"num_lines": 242
} |
"""Aiomongodel errors and exceptions."""
import re
import pymongo.errors
from aiomongodel.utils import _Empty
class AioMongodelException(Exception):
"""Base AioMongodel Exception class."""
class Error(AioMongodelException):
"""Base AioMongodel Error class."""
class ValidationError(Error):
"""Raised on model validation error.
Template for translation of error messages:
.. code-block:: python
translation = {
"field is required": "",
"none value is not allowed": "",
"blank value is not allowed": "",
"invalid value type": "",
"value does not match any variant": "",
"value does not match pattern {constraint}": "",
"length is less than {constraint}": "",
"length is greater than {constraint}": "",
"value is less than {constraint}": "",
"value is greater than {constraint}": "",
"value should be greater than {constraint}": "",
"value should be less than {constraint}": "",
"list length is less than {constraint}": "",
"list length is greater than {constraint}": "",
"value is not a valid email address": "",
}
Attributes:
error: Can contain a simple error string or
dict of nested validation errors.
constraint: A constraint value for validation error.
"""
def __init__(self, error=None, constraint=_Empty):
"""Create validation error.
Args:
error: Can be string or dict of {key => ValidationError}
constraint: A constraint value for the error. If it's not
empty it is used in error message formatting as
``{constraint}``.
"""
self.error = error
self.constraint = constraint
def as_dict(self, translation=None):
"""Extract all errors from ``self.error`` attribute.
Args:
translation (dict): A dict of translation for default validation
error messages.
Returns:
If ``self.error`` is a string return as string.
If ``self.error`` is a dict return
dict of {key => ValidationError.as_dict()}
"""
if not isinstance(self.error, dict):
if translation:
message = translation.get(self.error, self.error)
else:
message = self.error
return self._format(message)
return {key: item.as_dict(translation)
for key, item in self.error.items()}
def _format(self, message):
if self.constraint is _Empty:
return message
return message.format(constraint=self.constraint)
def __str__(self):
if isinstance(self.error, str):
return self._format(self.error)
return str(self.error)
def __repr__(self):
return 'ValidationError({0})'.format(self)
class StopValidation(AioMongodelException):
"""Raised when validation of the field should be stopped."""
class DocumentNotFoundError(Error):
"""Raised when document is not found in db."""
class DuplicateKeyError(Error, pymongo.errors.DuplicateKeyError):
"""Raised on unique key constraint error."""
index_name_regexp = re.compile(r': ([^ ]+) dup key:')
def __init__(self, message):
"""Create error.
Args:
message (str): String representation of
``pymongo.errors.DuplicateKeyError``.
"""
self.message = message
@property
def index_name(self):
"""Name of the unique index which raised error."""
m = self.index_name_regexp.search(self.message)
try:
return m.group(1)
except Exception:
return None
| {
"repo_name": "ilex/aiomongodel",
"path": "aiomongodel/errors.py",
"copies": "1",
"size": "3819",
"license": "mit",
"hash": -6899302539755537000,
"line_mean": 29.3095238095,
"line_max": 76,
"alpha_frac": 0.5810421576,
"autogenerated": false,
"ratio": 4.645985401459854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5727027559059855,
"avg_score": null,
"num_lines": null
} |
"""Aioreactive module.
Contains the AsyncRx chained obserable that allows method chaining of all operators.
Also contains all operators as plain functions.
To use this module:
Example:
>>> import aioreactive as rx
>>> xs = rx.from_iterable([1, 2, 3])
>>> ...
"""
from __future__ import annotations
from typing import (Any, AsyncIterable, Awaitable, Callable, Iterable, Tuple,
TypeVar, Union)
from expression.core import Option, pipe
from expression.system.disposable import AsyncDisposable
from .observables import (AsyncAnonymousObservable, AsyncIterableObservable,
AsyncObservable)
from .observers import (AsyncAnonymousObserver, AsyncAwaitableObserver,
AsyncIteratorObserver, AsyncNotificationObserver)
from .subject import AsyncSingleSubject, AsyncSubject
from .subscription import run
from .types import AsyncObserver, Filter, Flatten, Projection, Zipper
TSource = TypeVar("TSource")
TResult = TypeVar("TResult")
TOther = TypeVar("TOther")
class AsyncRx(AsyncObservable[TSource]):
"""An AsyncObservable class similar to classic Rx.
This class provides all operators as methods and supports
method chaining.
Example:
>>> AsyncRx.from_iterable([1,2,3]).map(lambda x: x + 2).filter(lambda x: x < 3)
All methods are lazy imported.
"""
def __init__(self, source: AsyncObservable[TSource]) -> None:
self._source = source
async def subscribe_async(self, observer: AsyncObserver[TSource]) -> AsyncDisposable:
"""Subscribe to the async observable.
Uses the given observer to subscribe asynchronously to the async
observable.
Args:
observer: The async observer to subscribe.
Returns:
An async disposable that can be used to dispose the
subscription.
"""
return await self._source.subscribe_async(observer)
def __getitem__(self, key: Union[slice, int]) -> AsyncRx[TSource]:
"""Slices the given source stream using Python slice notation.
The arguments to slice is start, stop and step given within
brackets [] and separated with the ':' character. It is
basically a wrapper around the operators skip(), skip_last(),
take(), take_last() and filter().
This marble diagram helps you remember how slices works with
streams. Positive numbers is relative to the start of the
events, while negative numbers are relative to the end
(on_completed) of the stream.
r---e---a---c---t---i---v---e---|
0 1 2 3 4 5 6 7 8
-8 -7 -6 -5 -4 -3 -2 -1
Example:
>>> result = source[1:10]
>>> result = source[1:-2]
>>> result = source[1:-1:2]
Args:
self: Source to slice
key: A slice object
Returns:
The sliced source stream.
"""
from .filtering import slice as _slice
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
else:
start, stop, step = key, key + 1, 1
return AsyncRx(pipe(self, _slice(start, stop, step)))
@classmethod
def create(cls, source: AsyncObservable[TSource]) -> AsyncRx[TSource]:
"""Create `AsyncChainedObservable`.
Helper method for creating an `AsyncChainedObservable` to the
the generic type rightly inferred by Pylance (__init__ returns None).
"""
return cls(source)
@classmethod
def empty(cls) -> AsyncRx[TSource]:
return AsyncRx(empty())
@classmethod
def from_iterable(cls, iter: Iterable[TSource]) -> AsyncRx[TSource]:
return AsyncRx(from_iterable(iter))
@classmethod
def from_async_iterable(cls, iter: AsyncIterable[TSource]) -> AsyncObservable[TSource]:
"""Convert an async iterable to an async observable stream.
Example:
>>> xs = AsyncRx.from_async_iterable(async_iterable)
Returns:
The source stream whose elements are pulled from the given
(async) iterable sequence.
"""
return AsyncRx(from_async_iterable(iter))
@classmethod
def single(cls, value: TSource) -> AsyncRx[TSource]:
from .create import single
return AsyncRx(single(value))
def as_async_observable(self) -> AsyncObservable[TSource]:
return AsyncAnonymousObservable(self.subscribe_async)
def choose(self, chooser: Callable[[TSource], Option[TSource]]) -> AsyncObservable[TSource]:
"""Choose.
Applies the given function to each element of the stream and returns
the stream comprised of the results for each element where the
function returns Some with some value.
Args:
chooser: A function to transform or filter the stream
by returning `Some(value)` or `Nothing`.
Returns:
The filtered and/or transformed stream.
"""
return AsyncRx(pipe(self, choose(chooser)))
def choose_async(self, chooser: Callable[[TSource], Awaitable[Option[TSource]]]) -> AsyncObservable[TSource]:
"""Choose async.
Applies the given async function to each element of the stream and
returns the stream comprised of the results for each element where
the function returns Some with some value.
Args:
chooser: A function to transform or filter the stream
asynchronously by returning `Some(value)` or `Nothing`.
Returns:
The filtered and transformed stream.
"""
return AsyncRx(pipe(self, choose_async(chooser)))
def combine_latest(self, other: AsyncObservable[TOther]) -> AsyncRx[Tuple[TSource, TOther]]:
from .combine import combine_latest
xs = pipe(self, combine_latest(other))
return AsyncRx.create(xs)
def concat(self, other: AsyncObservable[TSource]) -> AsyncRx[TSource]:
from .combine import concat_seq
return AsyncRx(concat_seq([self, other]))
def debounce(self, seconds: float) -> AsyncRx[TSource]:
"""Debounce observable stream.
Ignores values from an observable sequence which are followed by
another value before the given timeout.
Args:
seconds (float): Number of seconds to debounce.
Returns:
The debounced stream.
"""
from .timeshift import debounce
return AsyncRx(pipe(self, debounce(seconds)))
def delay(self, seconds: float) -> AsyncRx[TSource]:
from .timeshift import delay
return AsyncRx(pipe(self, delay(seconds)))
def distinct_until_changed(self) -> AsyncObservable[TSource]:
from .filtering import distinct_until_changed
return AsyncRx(distinct_until_changed(self))
def filter(self, predicate: Callable[[TSource], bool]) -> AsyncRx[TSource]:
"""Filter stream.
Filters the elements of an observable sequence based on a predicate.
Returns an observable sequence that contains elements from the input
sequence that satisfy the condition.
Args:
predicate:
A function to filter the stream by returning `True` to
keep the item, or `False` to filter and remove the item.
Returns:
The filtered stream.
"""
from .filtering import filter as _filter
return AsyncRx(pipe(self, _filter(predicate)))
def filteri(self, predicate: Callable[[TSource, int], bool]) -> AsyncObservable[TSource]:
"""Filter with index.
Filters the elements of an observable sequence based on a predicate
and incorporating the element's index on each element of the source.
Args:
predicate: Function to test each element.
Returns:
An observable sequence that contains elements from the input
sequence that satisfy the condition.
"""
return AsyncRx(pipe(self, filteri(predicate)))
def filter_async(self, predicate: Callable[[TSource], Awaitable[bool]]) -> AsyncRx[TSource]:
from .filtering import filter_async
return AsyncRx(pipe(self, filter_async(predicate)))
def flat_map(self, selector: Callable[[TSource], AsyncObservable[TResult]]) -> AsyncRx[TResult]:
from .transform import flat_map
return AsyncRx.create(pipe(self, flat_map(selector)))
def flat_map_async(self, selector: Callable[[TSource], Awaitable[AsyncObservable[TResult]]]) -> AsyncRx[TResult]:
from .transform import flat_map_async
return AsyncRx.create(pipe(self, flat_map_async(selector)))
def flat_map_latest_async(
self, mapper: Callable[[TSource], Awaitable[AsyncObservable[TResult]]]
) -> AsyncRx[TResult]:
"""Flat map latest async.
Asynchronosly transforms the items emitted by an source sequence
into observable streams, and mirror those items emitted by the
most-recently transformed observable sequence.
Args:
mapper: Function to transform each item into a new async
observable.
Returns:
An async observable that only merges values from the latest
async observable produced by the mapper.
"""
return AsyncRx(pipe(self, flat_map_latest_async(mapper)))
def map(self, selector: Callable[[TSource], TResult]) -> AsyncRx[TResult]:
from .transform import map
return AsyncRx(pipe(self, map(selector)))
def merge(self, other: AsyncObservable[TSource]) -> AsyncRx[TSource]:
from .combine import merge_inner
from .create import of_seq
source = of_seq([self, other])
return pipe(source, merge_inner(0), AsyncRx.create)
def skip(self, count: int) -> AsyncObservable[TSource]:
"""Skip items from start of the stream.
Bypasses a specified number of elements in an observable sequence
and then returns the remaining elements.
Args:
count: Items to skip
Returns:
Stream[TSource, TSource]: [description]
"""
return AsyncRx(pipe(self, skip(count)))
def skip_last(self, count: int) -> AsyncRx[TSource]:
"""Bypasses a specified number of elements at the end of an
observable sequence.
This operator accumulates a queue with a length enough to store
the first `count` elements. As more elements are received,
elements are taken from the front of the queue and produced on
the result sequence. This causes elements to be delayed.
Args:
count: Number of elements to bypass at the end of the
source sequence.
Returns:
An observable sequence containing the source sequence
elements except for the bypassed ones at the end.
"""
return AsyncRx(pipe(self, skip_last(count)))
def starfilter(
self: AsyncObservable[Iterable[Any]], predicate: Callable[..., bool]
) -> AsyncRx[Iterable[Any]]:
"""Filter and spread the arguments to the predicate.
Filters the elements of an observable sequence based on a predicate.
Returns:
An observable sequence that contains elements from the input
sequence that satisfy the condition.
"""
xs = pipe(self, starfilter(predicate))
return AsyncRx.create(xs)
def starmap(self, mapper: Callable[..., TResult]) -> AsyncObservable[TResult]:
"""Map and spread the arguments to the mapper.
Returns:
An observable sequence whose elements are the result of
invoking the mapper function on each element of the source.
"""
return AsyncRx(pipe(self, starmap(mapper)))
def take(self, count: int) -> AsyncObservable[TSource]:
"""Take the first elements from the stream.
Returns a specified number of contiguous elements from the start of
an observable sequence.
Args:
count Number of elements to take.
Returns:
An observable sequence that contains the specified number of
elements from the start of the input sequence.
"""
from .filtering import take
return AsyncRx(pipe(self, take(count)))
def take_last(self, count: int) -> AsyncObservable[TSource]:
"""Take last elements from stream.
Returns a specified number of contiguous elements from the end of an
observable sequence.
Args:
count: Number of elements to take.
Returns:
Stream[TSource, TSource]: [description]
"""
from .filtering import take_last
return AsyncRx(pipe(self, take_last(count)))
def take_until(self, other: AsyncObservable[Any]) -> AsyncRx[TSource]:
"""Take elements until other.
Returns the values from the source observable sequence until the
other observable sequence produces a value.
Args:
other: The other async observable
Returns:
Stream[TSource, TSource]: [description]
"""
from .filtering import take_until
return AsyncRx(pipe(self, take_until(other)))
def to_async_iterable(self) -> AsyncIterable[TSource]:
from .leave import to_async_iterable
return to_async_iterable(self)
def with_latest_from(self, other: AsyncObservable[TOther]) -> AsyncRx[Tuple[TSource, TOther]]:
from .combine import with_latest_from
return AsyncRx.create(pipe(self, with_latest_from(other)))
def as_async_observable(source: AsyncObservable[TSource]) -> AsyncObservable[TSource]:
return AsyncAnonymousObservable(source.subscribe_async)
def as_chained(source: AsyncObservable[TSource]) -> AsyncRx[TSource]:
return AsyncRx(source)
def choose(chooser: Callable[[TSource], Option[TResult]]) -> Projection[TSource, TResult]:
"""Choose.
Applies the given function to each element of the stream and returns
the stream comprised of the results for each element where the
function returns Some with some value.
Args:
chooser: A function to transform or filter the stream
by returning `Some(value)` or `Nothing`.
Returns:
The filtered and/or transformed stream.
"""
from .filtering import choose
return choose(chooser)
def choose_async(chooser: Callable[[TSource], Awaitable[Option[TResult]]]) -> Projection[TSource, TResult]:
"""Choose async.
Applies the given async function to each element of the stream and
returns the stream comprised of the results for each element where
the function returns Some with some value.
Args:
chooser: An async function to transform or filter the stream
by returning `Some(value)` or `Nothing`.
Returns:
The filtered and/or transformed stream.
"""
from .filtering import choose_async
return choose_async(chooser)
def combine_latest(other: AsyncObservable[TOther]) -> Zipper[Any, TOther]:
from .combine import combine_latest
return combine_latest(other)
def debounce(seconds: float) -> Filter:
"""Debounce source stream.
Ignores values from a source stream which are followed by another
value before seconds has elapsed.
Example:
>>> ys = pipe(xs, debounce(5)) # 5 seconds
Args:
seconds: Duration of the throttle period for each value
Returns:
A partially applied debounce function that takes the source
observable to debounce.
"""
from .timeshift import debounce
return debounce(seconds)
def catch(handler: Callable[[Exception], AsyncObservable[TSource]]) -> Projection[TSource, TSource]:
from .transform import catch
return catch(handler)
def concat(other: AsyncObservable[TSource]) -> Projection[TSource, TSource]:
"""Concatenates an observable sequence with another observable
sequence."""
def _concat(source: AsyncObservable[TSource]) -> AsyncObservable[TSource]:
from .combine import concat_seq
return concat_seq([source, other])
return _concat
def concat_seq(sources: Iterable[AsyncObservable[TSource]]) -> AsyncObservable[TSource]:
"""Concatenates an iterable of observable sequences."""
from .combine import concat_seq
return concat_seq(sources)
def defer(factory: Callable[[], AsyncObservable[TSource]]) -> AsyncObservable[TSource]:
"""Returns an observable sequence that invokes the specified factory
function whenever a new observer subscribes."""
from .create import defer
return defer(factory)
def delay(seconds: float) -> Filter:
from .timeshift import delay
return delay(seconds)
def distinct_until_changed(source: AsyncObservable[TSource]) -> AsyncObservable[TSource]:
from .filtering import distinct_until_changed
return distinct_until_changed(source)
def empty() -> "AsyncObservable[Any]":
from .create import empty
return empty()
def filter(predicate: Callable[[TSource], bool]) -> Projection[TSource, TSource]:
"""Filter stream.
Filters the elements of an observable sequence based on a predicate.
Returns an observable sequence that contains elements from the input
sequence that satisfy the condition.
Args:
predicate:
A function to filter the stream by returning `True` to
keep the item, or `False` to filter and remove the item.
Returns:
The filtered stream.
"""
from .filtering import filter as _filter
return _filter(predicate)
def filteri(predicate: Callable[[TSource, int], bool]) -> Projection[TSource, TSource]:
"""Filter with index.
Filters the elements of an observable sequence based on a predicate
and incorporating the element's index on each element of the source.
Args:
predicate: Function to test each element.
Returns:
An observable sequence that contains elements from the input
sequence that satisfy the condition.
"""
from .filtering import filteri
return filteri(predicate)
def filter_async(
predicate: Callable[[TSource], Awaitable[bool]]
) -> Callable[[AsyncObservable[TSource]], AsyncObservable[TSource]]:
from .filtering import filter_async
return filter_async(predicate)
def from_async(worker: Awaitable[TSource]) -> AsyncObservable[TSource]:
from .create import of_async
return of_async(worker)
def from_iterable(iterable: Iterable[TSource]) -> AsyncObservable[TSource]:
"""Convert an iterable to a source stream.
Example:
>>> xs = from_iterable([1,2,3])
Returns:
The source stream whose elements are pulled from the given
(async) iterable sequence.
"""
from .create import of_seq
return of_seq(iterable)
def flat_map(mapper: Callable[[TSource], AsyncObservable[TResult]]) -> Projection[TSource, TResult]:
from .transform import flat_map
return flat_map(mapper)
def flat_mapi(mapper: Callable[[TSource, int], AsyncObservable[TResult]]) -> Projection[TSource, TResult]:
from .transform import flat_mapi
return flat_mapi(mapper)
def flat_map_async(mapper: Callable[[TSource], Awaitable[AsyncObservable[TResult]]]) -> Projection[TSource, TResult]:
"""Flap map async.
Asynchronously projects each element of an observable sequence into
an observable sequence and merges the resulting observable sequences
back into one observable sequence.
Args:
mapperCallable ([type]): [description]
Awaitable ([type]): [description]
Returns:
Stream[TSource, TResult]: [description]
"""
from .transform import flat_map_async
return flat_map_async(mapper)
def flat_map_latest_async(
mapper: Callable[[TSource], Awaitable[AsyncObservable[TResult]]]
) -> Projection[TSource, TResult]:
"""Flat map latest async.
Asynchronosly transforms the items emitted by an source sequence
into observable streams, and mirror those items emitted by the
most-recently transformed observable sequence.
Args:
mapper (Callable[[TSource]): [description]
Awaitable ([type]): [description]
Returns:
Stream[TSource, TResult]: [description]
"""
from .transform import flat_map_latest_async
return flat_map_latest_async(mapper)
def from_async_iterable(iter: AsyncIterable[TSource]) -> "AsyncObservable[TSource]":
"""Convert an async iterable to an async observable stream.
Example:
>>> xs = rx.from_async_iterable(async_iterable)
Returns:
The source stream whose elements are pulled from the given
(async) iterable sequence.
"""
from .create import of_async_iterable
return AsyncRx(of_async_iterable(iter))
def interval(seconds: float, period: int) -> AsyncObservable[int]:
"""Returns an observable sequence that triggers the increasing
sequence starting with 0 after the given msecs, and the after each
period."""
from .create import interval
return interval(seconds, period)
def map(fn: Callable[[TSource], TResult]) -> Projection[TSource, TResult]:
from .transform import map as _map
return _map(fn)
def map_async(mapper: Callable[[TSource], Awaitable[TResult]]) -> Projection[TSource, TResult]:
"""Map asynchrnously.
Returns an observable sequence whose elements are the result of
invoking the async mapper function on each element of the
source."""
from .transform import map_async
return map_async(mapper)
def mapi_async(mapper: Callable[[TSource, int], Awaitable[TResult]]) -> Projection[TSource, TResult]:
"""Returns an observable sequence whose elements are the result of
invoking the async mapper function by incorporating the element's
index on each element of the source."""
from .transform import mapi_async
return mapi_async(mapper)
def mapi(mapper: Callable[[TSource, int], TResult]) -> Projection[TSource, TResult]:
"""Returns an observable sequence whose elements are the result of
invoking the mapper function and incorporating the element's index
on each element of the source."""
from .transform import mapi
return mapi(mapper)
def merge(other: AsyncObservable[TSource]) -> Projection[TSource, TSource]:
from .create import of_seq
def _(source: AsyncObservable[TSource]) -> AsyncObservable[TSource]:
ret = pipe(
of_seq([source, other]),
merge_inner(),
)
return ret
return _
def merge_inner(max_concurrent: int = 0) -> Flatten:
def _merge_inner(source: AsyncObservable[AsyncObservable[TSource]]) -> AsyncObservable[TSource]:
from .combine import merge_inner
return pipe(source, merge_inner(max_concurrent))
return _merge_inner
def merge_seq(sources: Iterable[AsyncObservable[TSource]]) -> AsyncObservable[TSource]:
from .create import of_seq
return pipe(
of_seq(sources),
merge_inner(),
)
def never() -> "AsyncObservable[Any]":
from .create import never
return never()
def of_async(workflow: Awaitable[TSource]) -> AsyncObservable[TSource]:
from .create import of_async
return of_async(workflow)
def retry(retry_count: int) -> Filter:
from .transform import retry
return retry(retry_count)
def subscribe_async(obv: AsyncObserver[TSource]) -> Callable[[AsyncObservable[TSource]], Awaitable[AsyncDisposable]]:
"""A pipeable subscribe async.
Example:
>>> await pipe(xs, filter(predicate), subscribe_async)
"""
from .subscription import subscribe_async
return subscribe_async(obv)
def single(value: TSource) -> AsyncObservable[TSource]:
from .create import single
return single(value)
def skip(count: int) -> Filter:
"""Skip items in the stream.
Bypasses a specified number of elements in an observable sequence
and then returns the remaining elements.
Args:
count (int): Items to skip
Returns:
The result stream with skipped items.
"""
from .filtering import skip
return skip(count)
def skip_last(count: int) -> Filter:
"""Bypasses a specified number of elements at the end of an
observable sequence.
This operator accumulates a queue with a length enough to store
the first `count` elements. As more elements are received,
elements are taken from the front of the queue and produced on
the result sequence. This causes elements to be delayed.
Args:
count: Number of elements to bypass at the end of the
source sequence.
Returns:
An observable sequence containing the source sequence
elements except for the bypassed ones at the end.
"""
from .filtering import skip_last
return skip_last(count)
def starfilter(predicate: Callable[..., bool]) -> Projection[Iterable[Any], Iterable[Any]]:
"""Filter and spread the arguments to the predicate.
Filters the elements of an observable sequence based on a predicate.
Returns:
An observable sequence that contains elements from the input
sequence that satisfy the condition.
"""
from .filtering import starfilter
return starfilter(predicate)
def starmap(mapper: Callable[..., TResult]) -> Projection[Any, TResult]:
"""Map and spread the arguments to the mapper.
Returns an observable sequence whose elements are the result of
invoking the mapper function on each element of the source."""
from .transform import starmap
return starmap(mapper)
def switch_latest() -> Flatten:
from .transform import switch_latest
return switch_latest
def take(count: int) -> Filter:
"""Take the first elements from the stream.
Returns a specified number of contiguous elements from the start of
an observable sequence.
Args:
count Number of elements to take.
Returns:
An observable sequence that contains the specified number of
elements from the start of the input stream.
"""
from .filtering import take
return take(count)
def take_last(count: int) -> Filter:
"""Take last elements from stream.
Returns a specified number of contiguous elements from the end of an
observable sequence.
Args:
count: Number of elements to take.
Returns:
An observable stream that contains the specified number of
elements from the end of the input stream.
"""
from .filtering import take_last
return take_last(count)
def take_until(other: AsyncObservable[Any]) -> Filter:
"""Take elements until other.
Returns the values from the source observable sequence until the
other observable sequence produces a value.
Args:
other: The other async observable
Returns:
Stream[TSource, TSource]: [description]
"""
from .filtering import take_until
return take_until(other)
def timer(due_time: float) -> AsyncObservable[int]:
"""Returns an observable sequence that triggers the value 0
after the given duetime in milliseconds.
"""
from .create import timer
return timer(due_time)
def to_async_iterable(source: AsyncObservable[TSource]) -> AsyncIterable[TSource]:
from .leave import to_async_iterable
return to_async_iterable(source)
def with_latest_from(other: AsyncObservable[TOther]) -> Zipper[Any, TOther]:
from .combine import with_latest_from
return with_latest_from(other)
__all__ = [
"AsyncAnonymousObservable",
"AsyncAnonymousObserver",
"AsyncAwaitableObserver",
"AsyncIteratorObserver",
"AsyncIterableObservable",
"AsyncNotificationObserver",
"AsyncObservable",
"AsyncObserver",
"asyncrx",
"AsyncSingleSubject",
"AsyncSubject",
"catch",
"choose",
"choose_async",
"combine_latest",
"concat",
"concat_seq",
"delay",
"empty",
"filter",
"filteri",
"filter_async",
"from_async",
"from_iterable",
"flat_map",
"flat_mapi",
"flat_map_async",
"flat_mapi_async",
"flat_map_latest_async",
"map",
"map_async",
"merge",
"merge_inner",
"merge_seq",
"never",
"retry",
"run",
"single",
"skip",
"skip_last",
"starfilter",
"starmap",
"Stream",
"switch_latest",
"to_async_iterable",
"take",
"take_last",
]
# flake8: noqa
from ._version import get_versions
__version__ = get_versions()["version"] # type: ignore
del get_versions
| {
"repo_name": "dbrattli/aioreactive",
"path": "aioreactive/__init__.py",
"copies": "1",
"size": "28718",
"license": "mit",
"hash": -6224501212110773000,
"line_mean": 28.2146490336,
"line_max": 117,
"alpha_frac": 0.6651577408,
"autogenerated": false,
"ratio": 4.29075153145077,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003948379452995314,
"num_lines": 983
} |
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import asyncio
import re
import struct
def singleton(cls):
instance = cls()
instance.__call__ = lambda: instance
return instance
@singleton
class Impulse:
pass
OSC_ADDR_REGEXP = '[^ #*,/?[\]{}]'
OSC_ADDR_SLASH_REGEXP = '[^ #*,?[\]{}]'
# translate osc address pattern to regexp for use in message handlers
def translate_pattern(pattern):
result = ''
i = 0
while i < len(pattern):
c = pattern[i]
if c == '/':
j = i + 1
if j < len(pattern) and pattern[j] == '/':
result += OSC_ADDR_SLASH_REGEXP + '*\/'
i = j
else:
result += re.escape(c)
elif c == '?':
result += OSC_ADDR_REGEXP
elif c == '*':
result += OSC_ADDR_REGEXP + '*'
elif c == '[':
j = pattern.index(']', i)
sub = pattern[i+1:j]
result += '['
if sub.startswith('!'):
sub = sub[1:]
result += '^'
result += '-'.join([re.escape(s) for s in sub.split('-')])
result += ']'
i = j
elif c == '{':
j = pattern.index('}', i)
sub = pattern[i+1:j]
result += '('
result += '|'.join([re.escape(s) for s in sub.split(',')])
result += ')'
i = j
else:
result += re.escape(c)
i += 1
return '^' + result + '$'
# read padded string from the beginning of a packet and return (value, tail)
def read_string(packet):
actual_len = packet.index(b'\x00')
padded_len = (actual_len // 4 + 1) * 4
return str(packet[:actual_len], 'ascii'), packet[padded_len:]
# read padded blob from the beginning of a packet and return (value, tail)
def read_blob(packet):
actual_len, tail = struct.unpack('>I', packet[:4])[0], packet[4:]
padded_len = (actual_len // 4 + 1) * 4
return tail[:padded_len][:actual_len], tail[padded_len:]
def parse_message(packet):
if packet.startswith(b'#bundle'):
raise NotImplementedError('OSC bundles are not yet supported')
tail = packet
path, tail = read_string(tail)
type_tag, tail = read_string(tail)
args = []
for t in type_tag[1:]:
if t == 'i':
len = 4
value, tail = struct.unpack('>i', tail[:len])[0], tail[len:]
elif t == 'f':
len = 4
value, tail = struct.unpack('>f', tail[:len])[0], tail[len:]
elif t == 'h':
len = 8
value, tail = struct.unpack('>q', tail[:len])[0], tail[len:]
elif t == 's':
value, tail = read_string(tail)
elif t == 'b':
value, tail = read_blob(tail)
elif t == 'T':
value = True
elif t == 'F':
value = False
elif t == 'N':
value = None
elif t == 'I':
value = Impulse
else:
raise RuntimeError('Unable to parse type "{}"'.format(t))
args.append(value)
return (path, args)
# convert string to padded osc string
def pack_string(s):
b = bytes(s + '\x00', 'ascii')
if len(b) % 4 != 0:
width = (len(b) // 4 + 1) * 4
b = b.ljust(width, b'\x00')
return b
# convert bytes to padded osc blob
def pack_blob(b):
b = bytes(struct.pack('>I', len(b)) + b)
if len(b) % 4 != 0:
width = (len(b) // 4 + 1) * 4
b = b.ljust(width, b'\x00')
return b
def pack_message(path, *args):
result = b''
typetag = ','
for arg in args:
if type(arg) == int:
result += struct.pack('>i', arg)
typetag += 'i'
elif type(arg) == float:
result += struct.pack('>f', arg)
typetag += 'f'
elif type(arg) == str:
result += pack_string(arg)
typetag += 's'
elif type(arg) == bytes:
result += pack_blob(arg)
typetag += 'b'
elif type(arg) == bool:
typetag += 'T' if arg else 'F'
elif arg is Impulse:
typetag += 'I'
elif arg is None:
typetag += 'N'
else:
raise NotImplementedError('Unable to pack {}'.format(type(arg)))
result = pack_string(path) + pack_string(typetag) + result
if len(result) % 4 != 0:
width = (len(result) // 4 + 1) * 4
result = result.ljust(width, b'\x00')
return result
class OSCProtocol(asyncio.DatagramProtocol):
def __init__(self, handlers=None):
super().__init__()
self._handlers = []
if handlers:
for pattern, handler in handlers.items():
self.add_handler(pattern, handler)
def add_handler(self, pattern, handler):
pattern_re = re.compile(translate_pattern(pattern))
self._handlers.append((pattern_re, handler))
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
path, args = parse_message(data)
# dispatch the message
for pattern_re, handler in self._handlers:
if pattern_re.match(path):
handler(addr, path, *args)
def send(self, path, *args, addr=None):
return self.transport.sendto(pack_message(path, *args), addr=addr)
async def send(target, path, *args, loop=None):
loop = loop or asyncio.get_event_loop()
transport, protocol = await loop.create_datagram_endpoint(OSCProtocol, remote_addr=target)
protocol.send(path, *args)
transport.close()
| {
"repo_name": "artfwo/aiosc",
"path": "aiosc.py",
"copies": "1",
"size": "6730",
"license": "mit",
"hash": 9058854128923644000,
"line_mean": 31.9901960784,
"line_max": 94,
"alpha_frac": 0.5569093611,
"autogenerated": false,
"ratio": 3.7851518560179978,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4842061217117998,
"avg_score": null,
"num_lines": null
} |
# AIPlayer.py
# base class for ai players
import itertools
from Players.Player import Player
from Game.Card import Card
class AIPlayer(Player):
"base class for ai players"
@staticmethod
def getOptions(hand, piles, requiredPoints):
options = []
typeOptions = []
# options.append([]) # we need to give the option of doing nothing
inPiles = {}
notInPiles = {}
wilds = {}
# first let's split the cards we have in our hand into two different parts,
# the cards that are in piles on the table, and the cards that aren't
for handPile in hand.getTypes():
# see if this is in piles
if handPile == 'R' or handPile == '2':
wilds[handPile] = hand.getCardCount(handPile)
elif handPile in piles:
inPiles[handPile] = hand.getCardCount(handPile)
else:
notInPiles[handPile] = hand.getCardCount(handPile)
# now, have we already laid down the required amount?
canLay = len(piles) > 0
if canLay == True:
# there are piles already on the table, do we have any we can lay down?
if(len(inPiles) > 0):
# sweet, we have some cards in our hand that are on the table, we can lay them down
# give the option to lay down only one type, or all if more than one
# TODO: give all options (if three cardTypes can be laid down, give options: [1], [2], [3], [1,2], [1,3], [2,3], [1,2,3])
for cardType, count in inPiles.iteritems():
options.append([cardType])
typeOptions.append(cardType)
# any clean stuff?
for cardType,count in notInPiles.iteritems():
if count > 2:
options.append([cardType])
typeOptions.append(cardType)
# now dirty
# if 'R' in notInPiles or '2' in notInPiles:
# # find sets of two and add wilds to them.
# for cardType,count in notInPiles.iteritems():
# if count == 2
# can we sprinkle the wilds in?
for cardType,count in wilds.iteritems():
typeOptions.append(cardType)
if len(typeOptions) >= 2:
for i in range(2, len(typeOptions)+1):
for x in itertools.combinations(typeOptions, i):
option = list(x)
if AIPlayer.isJustWilds(option):
continue
options.append(option)
else:
# we have to have enough points to lay down
# first gather clean options
cleanPoints = 0
cleanOptions = []
for cardType,count in notInPiles.iteritems():
if count > 2 and cardType != 'R' and cardType != '2':
cleanPoints += Card.getPoints(cardType, count)
cleanOptions.append(cardType)
# now gather dirty options
if cleanPoints >= requiredPoints:
canLay = True
# cool, so we can lay down cleanly, let's add these to the options
for cardType in preOptions:
options.append([cardType])
typeOptions.append(cardType)
else:
# still not enough points, do we have any wilds?
if 'R' in notInPiles:
points += Card.getPoints('R', notInPiles['R'])
if '2' in notInPiles:
points += Card.getPoints('2', notInPiles['R'])
if points >= requiredPoints:
canLay = True
# can we lay now?
# beforeLayOptions = []
# # get options and points
# cleanPoints = 0
# cleanOptions = []
# for cardType,count in notInPiles.iteritems():
# if count > 2 and cardType != 'R' and cardType != '2':
# cleanPoints += Card.getPoints(cardType, count)
# cleanOptions.append(cardType)
# if canLay == False:
# if cleanPoints > requiredPoints:
# # okay, so we haven't laid down, but we have enough cleanPoints to lay down
# options.append(cleanOptions)
# canLay = true
# else:
# # okay, we can't lay down cleanly, but do we have enough to lay down dirty?
# if 'R' in notInPiles or '2' in notInPiles:
# # we have wilds... iterate over cleanOptions
# else:
# # there are piles already on the table, we can put what we have into them
# if(len(inPiles) > 0):
# # yes, we have some cards in our hand that are on the table, we can lay them down
# for cardType, count in inPiles.iteritems():
# options.append(cardType)
options.append([]) # we need to give the option of doing nothing
print "number of options", len(options)
print options
for i in range(0,len(options)):
print "option " + `i+1`
for card in options[i]:
print card+": "+`hand.getCardCount(card)`
print ""
# print options
# print ""
@staticmethod
def isJustWilds(option):
for x in option:
if x != 'R' or x != '2':
return False
return True
| {
"repo_name": "ryanrborn/hand-and-foot-ai",
"path": "Players/AI/AIPlayer.py",
"copies": "1",
"size": "4404",
"license": "mit",
"hash": 9066730789626149000,
"line_mean": 29.7972027972,
"line_max": 125,
"alpha_frac": 0.6580381471,
"autogenerated": false,
"ratio": 3.010252904989747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4168291052089747,
"avg_score": null,
"num_lines": null
} |
"""aiplay URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from rest_framework_jwt.views import (
obtain_jwt_token,
refresh_jwt_token,
verify_jwt_token,
)
router = routers.SimpleRouter()
urlpatterns = [
url(r'^admin/', admin.site.urls),
# API
url(r'^account/', include('account.urls', namespace='accounts')),
url(r'^problem/', include('problem.urls', namespace='problem')),
url(r'^submission/', include('submission.urls', namespace='submission')),
url(r'^utils/', include('utils.urls')),
# third party
url(r'^docs/', include('rest_framework_docs.urls')),
url(r'^api-token-auth/', obtain_jwt_token),
url(r'^api-token-refresh/', refresh_jwt_token),
url(r'^api-token-verify/', verify_jwt_token),
# url(r'', include(router.urls)),
]
| {
"repo_name": "zerolfx/aiplay-api",
"path": "aiplay/urls.py",
"copies": "1",
"size": "1486",
"license": "mit",
"hash": -3086638415991074000,
"line_mean": 32.7727272727,
"line_max": 79,
"alpha_frac": 0.6769851952,
"autogenerated": false,
"ratio": 3.5213270142180093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4698312209418009,
"avg_score": null,
"num_lines": null
} |
# ai.py
# when user messages need to be processed using the wit.ai NLP platform, convert user input to narrowly defined outputs for easy processing
import os
from wit import Wit
import json
access_token = os.environ['WIT_AI_TOKEN']
client = Wit(access_token = access_token)
# TEST REQUEST
#message_text = "ELM 15 July"
#resp = client.message(message_text)
#print(resp)
# when the user types something, this method is called to convert message into a narrowly defined output to make things easier to process
# at the moment, it will return the masque and date typed by the user for further processing as these are the entities defined in wit.ai
def wit_response(message_text):
resp = client.message(message_text)
entity = None
value = None
entity2 = None
value2 = None
try:
entity = list(resp['entities'])[0]
value = resp['entities'][entity][0]['value']
entity2 = list(resp['entities'])[1]
value2 = list(resp['entities']['datetime'][0]['values'])[0]['value']
except:
pass
return (entity, value, entity2, value2)
# using the output given by wit.ai, the specific mosque and date will be extracted for further processing
def extract_info(info):
masjid = None
date = None
if str(info[0]) == "masjid" and str(info[2]) == "datetime":
masjid = str(info[1])
date = str(info[3])
elif str(info[0]) == "masjid":
masjid = str(info[1])
date = None
elif str(info[0]) == "datetime":
masjid = None
date = str(info[1])
else:
masjid = None
date = None
return (masjid, date)
# EXAMPLE WIT.AI REQUESTS
#info = wit_response("I want the prayer times for ELM on July 29")
#masjid, date = extract_info(info)
#print("MASJID:", masjid)
#print("DATE:", date[:10])
#info = wit_response("I want the prayer times for ELM")
#masjid, date = extract_info(info)
#print("MASJID:", masjid)
#print("DATE:", date)
#info = wit_response("I want the prayer times for 27 March")
#masjid, date = extract_info(info)
#print("MASJID:", masjid)
#print("DATE:", date)
| {
"repo_name": "tauseef-khan/london-prayer-times-bot",
"path": "ai.py",
"copies": "1",
"size": "1986",
"license": "mit",
"hash": -6168821735702935000,
"line_mean": 22.9277108434,
"line_max": 139,
"alpha_frac": 0.6933534743,
"autogenerated": false,
"ratio": 2.9909638554216866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8968632290324693,
"avg_score": 0.04313700787939877,
"num_lines": 83
} |
"""Airbase-ng."""
from .executor import ExecutorHelper
class AirbaseNg(ExecutorHelper):
"""Airbase-ng 1.6 - (C) 2008-2020 Thomas d'Otreppe
Original work: Martin Beck
https://www.aircrack-ng.org
Usage: airbase-ng <options> <replay interface>
Options:
-a <bssid> : set Access Point MAC address
-i <iface> : capture packets from this interface
-w <key> : use this WEP key to en-/decrypt packets
-h <MAC> : source mac for MITM mode
-f <disallow> : disallow specified client MACs (default: allow)
-W 0|1 : [don't] set WEP flag in beacons 0|1 (default: auto)
-q : quiet (do not print statistics)
-v : verbose (print more messages)
-A : Ad-Hoc Mode (allows other clients to peer)
-Y <proc> : (in|out|both) external packet processing
-c <channel> : sets the channel the AP is running on
-X : hidden ESSID
-s : force shared key authentication (default: auto)
-S : set shared key challenge length (default: 128)
-L : Caffe-Latte WEP attack
-N : cfrag WEP attack (recommended)
-x <nbpps> : number of packets per second (default: 100)
-y : disables responses to broadcast probes
-0 : set all WPA,WEP,open tags.
-z <type> : sets WPA1 tags.
-Z <type> : same as -z, but for WPA2
-V <type> : fake EAPOL 1=MD5 2=SHA1 3=auto
-F <prefix> : write all sent and received frames into pcap file
-P : respond to all probes, even when specifying ESSIDs
-I <interval> : sets the beacon interval value in ms
-C <seconds> : enables beaconing of probed ESSID values
-n <hex> : User specified ANonce when doing the 4-way handshake
--bssid <MAC> : BSSID to filter/use
--bssids <file> : read a list of BSSIDs out of that file
--client <MAC> : MAC of client to filter
--clients <file> : read a list of MACs out of that file
--essid <ESSID> : specify a single ESSID (default: default)
--essids <file> : read a list of ESSIDs out of that file
--help : Displays this usage screen
"""
command = 'airbase-ng'
requires_tempfile = False
requires_tempdir = False
| {
"repo_name": "XayOn/pyrcrack",
"path": "pyrcrack/airbase.py",
"copies": "1",
"size": "2507",
"license": "isc",
"hash": -551328248622173100,
"line_mean": 47.2115384615,
"line_max": 79,
"alpha_frac": 0.5508575987,
"autogenerated": false,
"ratio": 3.7699248120300752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48207824107300756,
"avg_score": null,
"num_lines": null
} |
"""airbrake-flask - Airbrake client for Python Flask
airbrake-flask is a fast library that use the amazing requests library to send
error, exception messages to airbrake.io. You can use this library with the
amazing gevent library to send your request asynchronously.
Example Usage with gevent
-------------------------
from flask import Flask, request, got_request_exception
from airbrake.airbrake import AirbrakeErrorHandler
import gevent
import sys
app = Flask(__name__)
ENV = ('ENV' in os.environ and os.environ['ENV']) or 'prod'
def log_exception(error):
handler = AirbrakeErrorHandler(api_key="PUT_YOUR_AIRBRAKE_KEY_HERE",
env_name=ENV, request=request)
gevent.spawn(handler.emit, error, sys.exc_info())
got_request_exception.connect(log_exception, app)
Contribute
----------
This library is hosted on Github and you can contribute there:
http://github.com/kienpham2000/airbrake-flask
"""
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2
Topic :: Software Development :: Quality Assurance
Topic :: Software Development :: Testing
""".splitlines()
from setuptools import setup
from airbrake import __version__, __app_url__, __app_name__
doc = __doc__.splitlines()
setup(
name=__app_name__,
version=__version__,
packages=['airbrake'],
zip_safe=False,
author='Kien Pham, Kane Kim',
author_email='kien@calolo.com',
url=__app_url__,
license='MIT',
description=doc[0],
long_description='\n'.join(doc[2:]),
install_requires=['requests', 'six'],
extras_require={
'test': [
'nose',
'coverage',
'mock',
'blinker',
'Flask',
'gevent'
]
},
test_suite="tests",
keywords='error airbrake flask exception',
classifiers=classifiers
)
| {
"repo_name": "kienpham2000/airbrake-flask",
"path": "setup.py",
"copies": "1",
"size": "1983",
"license": "mit",
"hash": -3182732716280509400,
"line_mean": 26.9295774648,
"line_max": 78,
"alpha_frac": 0.6651538074,
"autogenerated": false,
"ratio": 3.8061420345489445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49712958419489445,
"avg_score": null,
"num_lines": null
} |
# airconics_setup.py Setup file: here you can specify top level system variables
# ==============================================================================
# AirCONICS
# Aircraft CONfiguration through Integrated Cross-disciplinary Scripting
# version 0.2.1
# Andras Sobester, 2015.
# Bug reports to a.sobester@soton.ac.uk or @ASobester please.
# ==============================================================================
import sys
# *** There are three entries to edit here ***
# ONE:
# The string below should contain the path to your installation of AirCONICS
# Example: AirCONICSpath = "C:/Users/as/Documents/airconicsv021/"
AirCONICSpath = "/Users/bruce/github/foil-generator/airconicsv02122/"
# TWO:
# The string below should contain the path to your library of Selig-formatted
# airfoils. If you are using the UIUC library included in this installation,
# this should be the path to the coord_seligFmt folder included.
# Example: SeligPath = "C:/Users/as/Documents/airconicsv021/coord_seligFmt/"
SeligPath = "/Users/bruce/github/foil-generator/airconicsv021/coord_seligFmt/"
# THREE:
# Set this to
# 1 if you are running Rhino for Windows and
# 2 if you are running the MacOS version
RhinoVersion = 2
# ==============================================================================
print "System variables initialised."
sys.path.append(AirCONICSpath)
| {
"repo_name": "teknologika/foil-generator",
"path": "airconicsv021/airconics_setup.py",
"copies": "1",
"size": "1422",
"license": "apache-2.0",
"hash": 6097659105402468000,
"line_mean": 38.6285714286,
"line_max": 80,
"alpha_frac": 0.6153305204,
"autogenerated": false,
"ratio": 3.6555269922879177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4770857512687917,
"avg_score": null,
"num_lines": null
} |
# AirCONICStools.py ============================================================
# Ancillary methods called by the higher level AirCONICS functions.
# ==============================================================================
# AirCONICS
# Aircraft CONfiguration through Integrated Cross-disciplinary Scripting
# version 0.2.1
# Andras Sobester, 2015.
# Bug reports to a.sobester@soton.ac.uk or @ASobester please.
# ==============================================================================
# Preamble
from __future__ import division
import math, random, cmath, os, bisect, rhinoscriptsyntax as rs
import airconics_setup
def AddTEtoOpenAirfoil(AirfoilCurve):
# If the airfoil curve given as an argument is open at the trailing edge, it adds
# a line between the ends of the curve and joins this with the rest of the curve.
if rs.IsCurveClosed(AirfoilCurve) == False:
EP1 = rs.CurveEndPoint(AirfoilCurve)
rs.ReverseCurve(AirfoilCurve)
EP2 = rs.CurveEndPoint(AirfoilCurve)
rs.ReverseCurve(AirfoilCurve)
Closure = rs.AddLine(EP1,EP2)
rs.UnselectAllObjects()
rs.SelectObject(Closure)
rs.SelectObject(AirfoilCurve)
rs.Command("_Join ")
LO = rs.LastCreatedObjects()
AirfoilCurve = LO[0]
rs.UnselectAllObjects()
return AirfoilCurve
def ObjectsExtents(ObjectIds):
# Compute the extents in the X, Y and Z direction (in the current coordinate
# system) of the objects listed in the argument.
BB = rs.BoundingBox(ObjectIds)
XVec = []
YVec = []
ZVec = []
for i,P in enumerate(BB):
list.append(XVec,P.X)
list.append(YVec,P.Y)
list.append(ZVec,P.Z)
Xmin = min(XVec)
Ymin = min(YVec)
Zmin = min(ZVec)
Xmax = max(XVec)
Ymax = max(YVec)
Zmax = max(ZVec)
return (Xmin,Ymin,Zmin,Xmax,Ymax,Zmax)
def MirrorObjectXZ(ObjectId):
# Mirrors an object with respect to the XoZ plane
# Argument: the object to be mirrored.
# Returns: the ID of the object if succesful
TransMat = []
TransMat.append([1, 0, 0, 0])
TransMat.append([0, -1, 0,0])
TransMat.append([0, 0, 1, 0])
TransMat.append([0, 0, 0, 1])
TransObjId = rs.TransformObject(ObjectId, TransMat,True)
return TransObjId
def ScaleObjectWorld000(ObjectId, Scaling):
# Scales an object in the World coordinate system. Similar functionality to
# Rhino's ScaleObject, but the latter uses the current construction plane.
# The scaling is done with respect to the origin of the World system (0,0,0)
# Arguments: ObjectId - the object to be scaled
# Scaling - a three element list or tuple containg the scaling
# factors along x, y and z respectively
xform = rs.XformScale(Scaling)
ObjectId = rs.TransformObjects(ObjectId, xform)
return ObjectId
def frange(r1, r2, incr):
# A float version of the built-in range method
# NOTE - due to round-off issues this is due to be replaced in the next ver.
while r1 <= r2:
yield r1
r1 += incr
def pwfrange(start, stop, n):
# Immune to round-off problems
L = [0.0] * n
nm1 = n - 1
nm1inv = 1.0 / nm1
for i in range(n):
L[i] = nm1inv * (start*(nm1 - i) + stop*i)
return L
def CrossPlatformExtrudeSurface(SurfaceId, CurveId, Capped = True):
# rs.ExtrudeSurface not implemented in Rhino for OS X
if airconics_setup.RhinoVersion==1:
SolidId = rs.ExtrudeSurface(SurfaceId, CurveId, Capped)
else:
rs.SelectObject(CurveId)
rs.Command("_SelNone")
rs.SelectObject(SurfaceId)
rs.Command("_ExtrudeSrfAlongCrv _SelPrev")
SolidId = rs.LastCreatedObjects()
rs.Command("_SelNone")
return SolidId
def blendcorners(polyline_id, radius):
# Fillets the corners of a polyline (from the McNeel website)
if not polyline_id: return
vertices = rs.PolylineVertices(polyline_id)
if not vertices: return
if radius is None: return
between = lambda a,b: (a+b)/2.0
newverts = []
for i in range(len(vertices)-1):
a = vertices[i]
b = vertices[i+1]
segmentlength = rs.Distance(a, b)
vec_segment = rs.PointSubtract(b, a)
vec_segment = rs.VectorUnitize(vec_segment)
if radius<(0.5*segmentlength):
vec_segment = rs.VectorScale(vec_segment, radius)
else:
vec_segment = rs.VectorScale(vec_segment, 0.5*segmentlength)
w1 = rs.PointAdd(a, vec_segment)
w2 = rs.PointSubtract(b, vec_segment)
newverts.append(a)
newverts.append(between(a,w1))
newverts.append(w1)
newverts.append(between(w1,w2))
newverts.append(w2)
newverts.append(between(w2,b))
newverts.append(vertices[len(vertices)-1])
CrvId = rs.AddCurve(newverts, 5)
rs.DeleteObject(polyline_id)
return CrvId
def coslin(TransitionPoint):
# Creates a series of abscissas with cosine spacing from 0 to a TransitionPoint
# and a linear spacing thereafter, up to 1. The TransitionPoint corresponds to
# pi. Distribution suitable for airfoils defined by points. TransitionPoint
# must be in the range [0,1].
NCosPoints = 8
NLinPoints = 8
Abscissa = []
for ang in frange(0.0, math.pi/2 + 0.001, math.pi/(2*(NCosPoints-1.0))):
list.append(Abscissa, TransitionPoint*(1.0-math.cos(ang)))
for stp in frange((1-TransitionPoint)/NLinPoints, 1.0 + 0.001 - TransitionPoint, (1-TransitionPoint)/NLinPoints):
list.append(Abscissa, stp + TransitionPoint)
return Abscissa, NCosPoints
# Cubic equation solver based on Simple Recipes in Python by W. Park (1999) ====
def polar(x, y, deg=0): # radian if deg=0; degree if deg=1
from math import hypot, atan2, pi
if deg:
return hypot(x, y), 180.0 * atan2(y, x) / pi
else:
return hypot(x, y), atan2(y, x)
def cbrt(x):
from math import pow
if x >= 0:
return pow(x, 1.0/3.0)
else:
return -pow(abs(x), 1.0/3.0)
def quadratic(a, b, c=None):
import math, cmath
if c: # (ax^2 + bx + c = 0)
a, b = b / float(a), c / float(a)
t = a / 2.0
r = t**2 - b
if r >= 0: # real roots
y1 = math.sqrt(r)
else: # complex roots
y1 = cmath.sqrt(r)
y2 = -y1
return y1 - t, y2 - t
def cubic(a, b, c, d=None):
from math import cos
if d:
a, b, c = b / float(a), c / float(a), d / float(a)
t = a / 3.0
p, q = b - 3 * t**2, c - b * t + 2 * t**3
u, v = quadratic(q, -(p/3.0)**3)
if type(u) == type(0j): # complex cubic root
r, w = polar(u.real, u.imag)
y1 = 2 * cbrt(r) * cos(w / 3.0)
else: # real root
y1 = cbrt(u) + cbrt(v)
y2, y3 = quadratic(y1, p + y1**2)
return (y1 - t, y2 - t, y3 - t)
#===============================================================================
def linear_interpolation(x, y):
# Returns a function that interpolates the data in the argument (linearly)
x = x[:]
y = y[:]
def fn( v ) :
j = bisect.bisect_left(x, v)
i = j-1
if i < 0 :
return y[0]
if j >= len(x) :
return y[ -1 ]
return y[i] + (v-x[i])*(y[j]-y[i])/(x[j]-x[i])
return fn
def boxevopmin2d(funct, x0, deltax, xtol, MaxIter):
# A simple, 2d evolutionary optimizer to eliminate the need for scipy (only
# compatible at the moment on 32-bit Windows installations of Rhino)
def _evopmatrix2d(x0,deltax):
# Internal function - generates search matrix around current iteration
x1 = [x0[0]+deltax[0],x0[1]]
x2 = [x0[0]+deltax[0],x0[1]+deltax[1]]
x3 = [x0[0]+deltax[0],x0[1]-deltax[1]]
x4 = [x0[0],x0[1]+deltax[1]]
x5 = [x0[0],x0[1]-deltax[1]]
x6 = [x0[0]-deltax[0],x0[1]]
x7 = [x0[0]-deltax[0],x0[1]+deltax[1]]
x8 = [x0[0]-deltax[0],x0[1]-deltax[1]]
return [x0, x1, x2, x3, x4, x5, x6, x7, x8]
Iter = 1
f = []
# Evaluate the starting point
list.append(f,funct(x0))
while (Iter < MaxIter):
print "EVOP optimizer iteration", Iter
# Generate the eight points around it
xmat = _evopmatrix2d(x0,deltax)
# ...and evaluate them
for i in range(1,9):
list.append(f, funct(xmat[i]))
# f and xmat should now contain 9 points
m = min(f)
mi = [i for i, j in enumerate(f) if j == m]
# Point mi is the best and should form the basis of the next iteration
x0 = xmat[mi[0]]
f = []
list.append(f, m)
# Ready to construct new matrix around this point or finish
if mi[0]==0:
# We are stuck, step size reduction is needed
# - cut both by the golden ratio
deltax[0] = 0.618*deltax[0]
deltax[1] = 0.618*deltax[1]
print "Step size reduced to", deltax[0], deltax[1]
if max(deltax) < xtol:
# Trigger an exit if tolerance xtol is reached
print "Step size is now below xtol - stopping"
Iter = MaxIter
Iter = Iter + 1
xmin = x0
fmin = f[0]
return xmin, fmin
def AssignMaterial(object_id, Material):
# Adds simple, pre-defined material surface property sets to objects. Note that
# these are simply visual properties, they do not make the model suitable for
# centre of gravity, etc. calculations.
MatInd = rs.AddMaterialToObject(object_id)
if Material=="White_composite_external":
rs.MaterialColor(MatInd,(255,255,255))
rs.MaterialShine(MatInd, 100)
rs.MaterialTransparency(MatInd, 0)
rs.MaterialReflectiveColor(MatInd, (255,255,255))
elif Material=="Plexiglass":
rs.MaterialColor(MatInd,(255,255,255))
rs.MaterialShine(MatInd, 255)
rs.MaterialTransparency(MatInd, 0.8)
elif Material=="Skin":
rs.MaterialColor(MatInd,(229,184,143))
rs.MaterialShine(MatInd, 0)
rs.MaterialTransparency(MatInd, 0)
elif Material=="Panel":
rs.MaterialColor(MatInd,(0,0,0))
rs.MaterialShine(MatInd, 0)
rs.MaterialTransparency(MatInd, 0)
elif Material=="PropDisk":
rs.MaterialColor(MatInd,(255,255,255))
rs.MaterialShine(MatInd, 0)
rs.MaterialTransparency(MatInd, 0.9)
elif Material=="Structure":
rs.MaterialColor(MatInd,(0,0,0))
rs.MaterialShine(MatInd, 0)
rs.MaterialTransparency(MatInd, 0)
elif Material=="ShinyBABlueMetal":
rs.MaterialColor(MatInd,(0,32,91))
rs.MaterialShine(MatInd, 150)
rs.MaterialTransparency(MatInd, 0)
elif Material=="ShinyBARedMetal":
rs.MaterialColor(MatInd,(218, 41, 28))
rs.MaterialShine(MatInd, 150)
rs.MaterialTransparency(MatInd, 0)
elif Material=="UnpaintedMetal":
rs.MaterialColor(MatInd,(188,198,204))
rs.MaterialShine(MatInd, 30)
rs.MaterialTransparency(MatInd, 0)
elif Material=="FanDisk":
rs.MaterialColor(MatInd,(0,0,0))
rs.MaterialShine(MatInd, 30)
rs.MaterialTransparency(MatInd, 0.2)
elif Material=="ShinyBlack":
rs.MaterialColor(MatInd,(0,0,0))
rs.MaterialShine(MatInd, 120)
rs.MaterialTransparency(MatInd, 0)
def CutSect(SurfaceId, SpanStation):
# SpanStation is assumed to be along the y direction, in the range [0,1]
(Xmin,Ymin,Zmin,Xmax,Ymax,Zmax) = ObjectsExtents(SurfaceId)
YStation = Ymin + (Ymax-Ymin)*SpanStation
OriginX = Xmin -1
OriginZ = Zmin -1
CutPlane = rs.PlaneFromPoints((OriginX, YStation, OriginZ), (Xmax+1, YStation, OriginZ), (OriginX, YStation, Zmax + 1))
CutPlaneSrf = rs.AddPlaneSurface(CutPlane, max([(Xmax-Xmin),(Ymax-Ymin),(Zmax-Zmin)])+1, max([(Xmax-Xmin),(Ymax-Ymin),(Zmax-Zmin)])+1 )
I = rs.IntersectBreps(CutPlaneSrf, SurfaceId)
Section = I[0]
rs.DeleteObject(CutPlaneSrf)
(Xmin,Ymin,Zmin,Xmax,Ymax,Zmax) = ObjectsExtents(Section)
# Find the apparent chord of the section (that is, the line connecting the fore
# most and aftmost points on the curve
DivPoints = rs.DivideCurve(Section, 200)
Xs = []
Ys = []
Zs = []
for DP in DivPoints:
list.append(Xs, DP[0])
list.append(Ys, DP[1])
list.append(Zs, DP[2])
val, idx = min((val, idx) for (idx, val) in enumerate(Xs))
LeadingPoint = [Xs[idx], Ys[idx], Zs[idx]]
val, idx = max((val, idx) for (idx, val) in enumerate(Xs))
TrailingPoint = [Xs[idx], Ys[idx], Zs[idx]]
Chord = rs.AddLine(TrailingPoint, LeadingPoint)
return Section, Chord | {
"repo_name": "teknologika/foil-generator",
"path": "airconicsv021/AirCONICStools.py",
"copies": "1",
"size": "13188",
"license": "apache-2.0",
"hash": 3198392840716299300,
"line_mean": 32.170984456,
"line_max": 139,
"alpha_frac": 0.5794661814,
"autogenerated": false,
"ratio": 3.2260273972602738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4305493578660274,
"avg_score": null,
"num_lines": null
} |
"""
This module contains a library of classes devoted to modeling aircraft parts.
The main purpose of this library is to model various types of aircraft parts.
Currently only wing objects are suported, however in the future it is possible
that fuselages as well as other parts will be added.
:SUMARRY OF THE CLASSES:
- `Wing`: Creates a wing aircraft. This wing is capable of modeling the
structures of the aircraft wing as well as the aerodynamics. The structures
are modeled with a combination of beam models currently, however it is
likely that Ritz method laminates will also incorporated for buckling
prediction purposes. The aerodynamics are currently modeled with potential
flow doublet panels.
"""
__docformat__ = 'restructuredtext'
# =============================================================================
# AeroComBAT MODULES
# =============================================================================
from Structures import WingSection, Laminate
from Aerodynamics import CAERO1
# =============================================================================
# IMPORT ANACONDA ASSOCIATED MODULES
# =============================================================================
import numpy as np
import mayavi.mlab as mlab
# =============================================================================
# DEFINE AeroComBAT AIRCRAFT PART CLASSES
# =============================================================================
class Wing:
def __init__(self,PID,p1,p2,croot,ctip,x0_spar,xf_spar,Y_rib,n_ply,m_ply,mat_lib,**kwargs):
"""Creates a wing object.
This object represents a wing and contains both structural and
aerodynamic models.
:Args:
- `PID (int)`: The integer ID linked to this part.
- `p1 (1x3 np.array[float])`: The initial x,y,z coordinates of the wing.
- `p2 (1x3 np.array[float]`: The final x,y,z coordinates of the wing.
- `croot (float)`: The root chord length.
- `ctip (float)`: The tip chord length.
- `x0_spar (float)`: The non-dimensional starting location of the cross
section.
- `xf_spar (float)`: The non-dimensional ending location of the cross
section.
- `Y_rib (1xN Array[float])`: The non-dimensional rib locations within
the wing. This dimension is primarily used to create wing-sections
which primarily define the buckling span's for laminate objects.
- `n_ply (1xM Array[int])`: An array of integers specifying the number
plies to be used in the model. Each integer refers to the number of
plies to be used for at a given orientation.
- `m_ply (1xM Array[int])`: An array of integers specifying the
material ID to be used for the corresponding number of plies in
n_ply at a given orientation.
- `th_ply (1xM Array[int])`: An array of floats specifying the
degree orientations of the plies used by the lamiantes in the
model.
- `mat_lib (obj)`: A material library containing all of the material
objets to be used in the model.
- `name (str)`: The name of the airfoil section to be used for cross
section generation.
- `wing_SNID (int)`: The first node ID associated with the wing.
- `wing_SEID (int)`: The first beam element ID associated with the wing.
- `wing_SSBID (int)`: The first superbeam ID associated with the wing.
- `SXID (int)`: The starting cross-section ID used by the wing.
- `noe (float)`: The number of beam elements to be used in the wing per
unit length.
- `n_orients (int)`: The number of fiber orientations to be used in
each laminate.
- `n_lams (int)`: The number of laminates required to mesh the desired
cross-section.
- `meshSize (float)`: The maximum aspect ratio a 2D element may have in
the cross-section.
- `ref_ax (str)`: The reference axis to be loaded in the wing.
- `chordVec (1x3 np.array[float])`: This numpy array is used to orient
the cross-section in 3D space. It corresponds to the local unit x-
vector in the cross-section, expressed in the global frame.
- `typeXSect (str)`: The type of cross-section to be used by the wing
structure. Currently the suported typed are 'boxbeam', 'laminate',
and 'rectBoxBeam'. See the meshing class in the structures module
for more details.
:Returns:
- None
"""
#The type of the object
self.type='wing'
# Initialize the array holding wing sections
self.wingSects = []
# Initialize the wing ID
self.PID = PID
# Initialize Lifting surface Array
self.liftingSurfaces = {}
# Name of the airfoil section (used to generate the OML shape of the x-section)
name = kwargs.pop('name','NACA0012')
# The initial starting node ID for the structural generation of the wing
tmp_SB_SNID = kwargs.pop('wing_SNID',0)
# The initial beam element EID for the first superbeam ID
tmp_SB_SEID = kwargs.pop('wing_SEID',0)
# The initial starting superbeam ID
tmp_SB_SBID = kwargs.pop('wing_SSBID',0)
# The starting cross-section ID
SXID = kwargs.pop('SXID',0)
# The number of beam elements to be used per unit length
noe = kwargs.pop('noe',10)
# The number of fiber orientations to be used in each laminate
n_orients = kwargs.pop('n_orients',4)
# The number of laminates required to mesh the desired cross-section
n_lams = kwargs.pop('n_lams',4)
# Laminate symmetry
lam_sym = kwargs.pop('lam_sym',False)
# The maximum aspect ratio a 2D element may have in the cross-section
meshSize = kwargs.pop('meshSize',4)
# The reference axis to be loaded in the wing
ref_ax = kwargs.pop('ref_ax','shearCntr')
# Chord vector for wing
chordVec = kwargs.pop('chordVec',np.array([1.,0.,0.]))
# Orientations of each ply in the lamiante
th_ply = kwargs.pop('th_ply',[0]*len(n_ply))
# Type of cross-section
typeXSect = kwargs.pop('typeXSect','box')
# Calculate the wing span:
b_s = np.linalg.norm(p2-p1)
# Lambda function to calculate average panel chord length on on the fly.
chord = lambda y: (ctip-croot)*y/b_s+croot
# Create wing sections between each rib:
for i in range(0,len(Y_rib)-1):
# Create a wing panel object based on the average chord length
# Determine the laminate schedule beam section
section_lams = []
for j in range(0,n_lams):
# Select vectors of thicknesses and MID's:
n_i_tmp = n_ply[i*n_lams+n_orients*j:i*n_lams+n_orients*j+n_orients]
m_i_tmp = m_ply[i*n_lams+n_orients*j:i*n_lams+n_orients*j+n_orients]
th_i_tmp = th_ply[i*n_lams+n_orients*j:i*n_lams+n_orients*j+n_orients]
section_lams += [Laminate(n_i_tmp,m_i_tmp,mat_lib,sym=lam_sym,th=th_i_tmp)]
# Compile all information needed to create xsection and beams
# Starting coordiante of super beam
tmp_x1 = p1+Y_rib[i]*(p2-p1)
# Ending coordiante of super beam
tmp_x2 = p1+Y_rib[i+1]*(p2-p1)
tmpWingSect = WingSection(tmp_x1,tmp_x2,chord,name,x0_spar,xf_spar,\
section_lams,mat_lib,noe,SSBID=tmp_SB_SBID,SNID=tmp_SB_SNID,\
SEID=tmp_SB_SEID,meshSize=meshSize,SXID=SXID,ref_ax=ref_ax,\
chordVec=chordVec,typeXSect=typeXSect)
# Prepare ID values for next iteration
tmp_SB_SNID = tmpWingSect.SuperBeams[-1].enid
tmp_SB_SEID = max(tmpWingSect.SuperBeams[-1].elems.keys())+1
tmp_SB_SBID = tmpWingSect.SuperBeams[-1].SBID+1
self.wingSects += [tmpWingSect]
SXID = max(self.wingSects[i].XIDs)+1
#self.model.addElements(tmpWingSect.SuperBeams)
def addLiftingSurface(self,SID,x1,x2,x3,x4,nspan,nchord):
"""Adds a potential flow lifting surface to the model.
This method adds a potential flow panel aerodynamic model to the wing
part. The x1,x2,x3, and x4 points correspond to the root leading edge,
root trailing edge, tip trailing edge, and tip leading edge of the wing
respectively. Currently the only suported types of panels are doublet-
lattice panels to be used for unsteady aerodynamic models.
:Args:
- `SID (int)`: The lifting surface integer identifier corresponding to
the lifting surface.
- `x1 (1x3 numpy array)`: The point in 3D space corresponding to the
root leading edge point of the lifting surface.
- `x2 (1x3 numpy array)`: The point in 3D space corresponding to the
root trailing edge point of the lifting surface.
- `x3 (1x3 numpy array)`: The point in 3D space corresponding to the
tip trailing edge point of the lifting surface.
- `x4 (1x3 numpy array)`: The point in 3D space corresponding to the
tip leading edge point of the lifting surface.
- `nspan (int)`: The number of boxes to be used in the spanwise
direction.
- `nchord (int)`: The number of boxes to be used in the chordwise
direction.
:Returns:
- None
.. Note:: Mutliple surfaces could be added to the wing part.
.. Warning:: In order to use the doublet lattice method, the chord
lines of the lifting surface must run in the x-direction, and there
can be no geometric angles of attack present. The geometry of a
general wing can be seen in the figure below:
.. image:: images/DoubletLatticeWing.png
:align: center
"""
# Create the lifting surface
tmpLiftSurf = CAERO1(SID,x1,x2,x3,x4,nspan,nchord)
# Create a temporary dictionary of CQUADA's to iterate through later
Dict = tmpLiftSurf.CQUADAs.copy()
# Store it in the wing object
self.liftingSurfaces[SID]=tmpLiftSurf
# CONNECT AERO BOXES TO ELEMENTS
# For all elements in the wing
for wingSect in self.wingSects:
for superBeam in wingSect.SuperBeams:
for EID, elem in superBeam.elems.iteritems():
# For all panels in the lifting surface
tmpDict = Dict.copy()
for PANID, panel in tmpDict.iteritems():
# Determine the y-coord of the recieving point
y34pan = panel.y(-0.5,0)
# If the panel y-coord is between the nodes of the elem
if (y34pan-elem.n1.x[1])*(y34pan-elem.n2.x[1])<0:
# The panel's displacement can be described by the
# displacements of the nodes used by the element.
# Determine the x-coord in the elem corresponding
# to the location of the panel recieving point
t = (y34pan-elem.n1.x[1])/(elem.n2.x[1]-elem.n1.x[1])
# Determine the nodal contributions of the displacements
panel.DOF[elem.n1.NID] = t
panel.DOF[elem.n2.NID] = 1-t
# Determine the moment arm of the box acting on the
# beam
x34elem = elem.n1.x[0]+t*(elem.n2.x[0]-elem.n1.x[0])
x34pan = panel.x(-0.5,0)
panel.xarm = x34pan-x34elem
# Remove this reference from the dictionary so it
# is not iterated over again for the next element
del Dict[PANID]
for NID, node in superBeam.nodes.iteritems():
# For all panels in the lifting surface
tmpDict = Dict.copy()
for PANID, panel in tmpDict.iteritems():
# Determine the y-coord of the recieving point
y34pan = panel.y(-0.5,0)
# If the panel y-coord is between the nodes of the elem
if abs(y34pan-node.x[1])<1e-6:
# The panel's displacement can be described by the
# displacements of the nodes used by the element.
# Determine the x-coord in the elem corresponding
# to the location of the panel recieving point
t = 1
# Determine the nodal contributions of the displacements
panel.DOF[node.NID] = t
# Determine the moment arm of the box acting on the
# beam
x34node = node.x[0]
x34pan = panel.x(-0.5,0)
panel.xarm = x34pan-x34node
# Remove this reference from the dictionary so it
# is not iterated over again for the next element
del Dict[PANID]
if len(Dict.keys())>0:
print('Warning, some elements could not have their displacements'+
' matched by beam elements. This includes:')
for PANID, panel in Dict.iteritems():
#panel.DOF[-1] = None
print('CQUADA %d' %(PANID))
def plotRigidWing(self,**kwargs):
"""Plots the rigid wing.
This method plots the rigid model of the wing. This includes the
reference axis of the beams, the cross-sections of the beams, and the
lifting surfaces that make up the wing. This is an excellent check to
perform before adding the part to a FEM model.
:Args:
- `figName (str)`: The name of the MayaVi figure. 'Rigid Wing' by
default.
- `numXSects (int)`: The number of cross-sections that each wing
section will display. By default it is 2.
- `color (1x3 tuple(int))`: This is a length 3 tuple to be used as the
color of the beam reference axes. Black by default.
:Returns:
- None
"""
figName = kwargs.pop('figName','Rigid Wing')
# Chose the color of the beam, defaults to black, accepts tuple
clr = kwargs.pop('color',(0,0,0))
# Chose the number of cross-sections to be plotted. By default this is 2
# One at the beggining and one at the end of the super beam
numXSects = kwargs.pop('numXSects',2)
mlab.figure(figure=figName)
for sects in self.wingSects:
sects.plotRigid(figName=figName,clr=clr,numXSects=numXSects)
if len(self.liftingSurfaces)>0:
for SID, surface in self.liftingSurfaces.iteritems():
surface.plotLiftingSurface(figName=figName) | {
"repo_name": "bennames/AeroComBAT-Project",
"path": "AeroComBAT/AircraftParts.py",
"copies": "1",
"size": "15495",
"license": "mit",
"hash": 4831329771102902000,
"line_mean": 51.1750841751,
"line_max": 95,
"alpha_frac": 0.5699903195,
"autogenerated": false,
"ratio": 3.8776276276276276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49476179471276277,
"avg_score": null,
"num_lines": null
} |
"""Aireplay-ng"""
import asyncio
from parse import parse
from .executor import ExecutorHelper
class AireplayNg(ExecutorHelper):
""" Aireplay-ng 1.6 - (C) 2006-2020 Thomas d'Otreppe
https://www.aircrack-ng.org
Usage: aireplay-ng <options> <replay interface>
Options:
-b bssid : MAC address, Access Point
-d dmac : MAC address, Destination
-s smac : MAC address, Source
-m len : minimum packet length
-n len : maximum packet length
-u type : frame control, type field
-v subt : frame control, subtype field
-t tods : frame control, To DS bit
-f fromds : frame control, From DS bit
-w iswep : frame control, WEP bit
-D : disable AP detection
-x nbpps : number of packets per second
-p fctrl : set frame control word (hex)
-a bssid : set Access Point MAC address
-c dmac : set Destination MAC address
-h smac : set Source MAC address
-g value : change ring buffer size (default: 8)
-F : choose first matching packet
-e essid : set target AP SSID
-o npckts : number of packets per burst (0=auto, default: 1)
-q sec : seconds between keep-alives
-Q : send reassociation requests
-y prga : keystream for shared key auth
-T n : exit after retry fake auth request n time
-j : inject FromDS packets
-k IP : set destination IP in fragments
-l IP : set source IP in fragments
-B : activates the bitrate test
-i iface : capture packets from this interface
-r file : extract packets from this pcap file
-R : disable /dev/rtc usage
--ignore-negative-one : if the interface's channel can't be determined
--deauth-rc <rc> : Deauthentication reason code [0-254]
--deauth <count> : deauthenticate 1 or all stations (-0)
--fakeauth <delay> : fake authentication with AP (-1)
--interactive : interactive frame selection (-2)
--arpreplay : standard ARP-request replay (-3)
--chopchop : decrypt/chopchop WEP packet (-4)
--fragment : generates valid keystream (-5)
--caffe-latte : query a client for new IVs (-6)
--cfrag : fragments against a client (-7)
--migmode : attacks WPA migration mode (-8)
--test : tests injection and quality (-9)
--help : Displays this usage screen
"""
command = 'aireplay-ng'
requires_tempfile = False
requires_tempdir = False
async def run(self, *args, **kwargs):
"""Run async, with prefix stablished as tempdir."""
asyncio.create_task(self.result_updater())
return await super().run(*args, **kwargs)
async def result_updater(self):
"""Set result on local object."""
while not self.proc:
await asyncio.sleep(1)
while self.proc.returncode is None:
self.meta['result'] = list(await self.get_results())
await asyncio.sleep(2)
async def get_results(self):
"""Get results list."""
results = (await self.proc.communicate())[0].decode()
res = (a for a in results.split('\n') if 'BSSID' in a)
return [parse("{date} {message} -- BSSID: [{bssid}]", a) for a in res]
| {
"repo_name": "XayOn/pyrcrack",
"path": "pyrcrack/aireplay.py",
"copies": "1",
"size": "3513",
"license": "isc",
"hash": 1506223169662656800,
"line_mean": 40.3294117647,
"line_max": 79,
"alpha_frac": 0.5701679476,
"autogenerated": false,
"ratio": 3.947191011235955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 85
} |
from random import uniform as rand
from struct import pack, unpack
from time import time
import unittest
from bisect import insort_left
from twisted.internet import protocol
from twisted.internet import abstract
from twisted.internet import reactor
from twisted.internet import app
from twisted.internet import interfaces
# flags
FLAG_AIRHOOK = 128
FLAG_OBSERVED = 16
FLAG_SESSION = 8
FLAG_MISSED = 4
FLAG_NEXT = 2
FLAG_INTERVAL = 1
MAX_PACKET_SIZE = 1450
pending = 0
sent = 1
confirmed = 2
class Airhook:
def __init__(self):
self.noisy = None
# this should be changed to storage that drops old entries
self.connections = {}
def datagramReceived(self, datagram, addr):
#print `addr`, `datagram`
#if addr != self.addr:
self.connectionForAddr(addr).datagramReceived(datagram)
def connectionForAddr(self, addr):
if addr == self.addr:
raise Exception
if not self.connections.has_key(addr):
conn = self.connection()
conn.addr = addr
conn.makeConnection(self.transport)
conn.protocol = self.factory.buildProtocol(addr)
conn.protocol.makeConnection(conn)
self.connections[addr] = conn
else:
conn = self.connections[addr]
return conn
def makeConnection(self, transport):
self.transport = transport
addr = transport.getHost()
self.addr = (addr.host, addr.port)
def doStart(self):
pass
def doStop(self):
pass
class AirhookPacket:
def __init__(self, msg):
self.datagram = msg
self.oseq = ord(msg[1])
self.seq = unpack("!H", msg[2:4])[0]
self.flags = ord(msg[0])
self.session = None
self.observed = None
self.next = None
self.missed = []
self.msgs = []
skip = 4
if self.flags & FLAG_OBSERVED:
self.observed = unpack("!L", msg[skip:skip+4])[0]
skip += 4
if self.flags & FLAG_SESSION:
self.session = unpack("!L", msg[skip:skip+4])[0]
skip += 4
if self.flags & FLAG_NEXT:
self.next = ord(msg[skip])
skip += 1
if self.flags & FLAG_MISSED:
num = ord(msg[skip]) + 1
skip += 1
for i in range(num):
self.missed.append( ord(msg[skip+i]))
skip += num
if self.flags & FLAG_NEXT:
while len(msg) - skip > 0:
n = ord(msg[skip]) + 1
skip+=1
self.msgs.append( msg[skip:skip+n])
skip += n
class AirhookConnection:
def __init__(self):
self.outSeq = 0 # highest sequence we have sent, can't be 255 more than obSeq
self.observed = None # their session id
self.sessionID = long(rand(0, 2**32)) # our session id
self.lastTransmit = 0 # time we last sent a packet with messages
self.lastReceived = 0 # time we last received a packet with messages
self.lastTransmitSeq = -1 # last sequence we sent a packet
self.state = pending # one of pending, sent, confirmed
self.sendSession = None # send session/observed fields until obSeq > sendSession
self.response = 0 # if we know we have a response now (like resending missed packets)
self.noisy = 0
self.resetConnection()
def makeConnection(self, transport):
self.transport = transport
def resetConnection(self):
self.weMissed = []
self.outMsgs = [None] * 256 # outgoing messages (seq sent, message), index = message number
self.inMsg = 0 # next incoming message number
self.outMsgNums = [0] * 256 # outgoing message numbers i = outNum % 256
self.next = 0 # next outgoing message number
self.scheduled = 0 # a sendNext is scheduled, don't schedule another
self.omsgq = [] # list of messages to go out
self.imsgq = [] # list of messages coming in
self.obSeq = 0 # highest sequence confirmed by remote
self.inSeq = 0 # last received sequence
def datagramReceived(self, datagram):
if not datagram:
return
if self.noisy:
print `datagram`
p = AirhookPacket(datagram)
# check for state change
if self.state == pending:
if p.observed != None and p.session != None:
if p.observed == self.sessionID:
self.observed = p.session
self.state = confirmed
self.response = 1
else:
self.observed = p.session
self.response = 1
elif p.session != None:
self.observed = p.session
self.response = 1
else:
self.response = 1
elif self.state == sent:
if p.observed != None and p.session != None:
if p.observed == self.sessionID:
self.observed = p.session
self.sendSession = self.outSeq
self.state = confirmed
if p.session != None:
if not self.observed:
self.observed = p.session
elif self.observed != p.session:
self.state = pending
self.observed = p.session
self.resetConnection()
self.response = 1
if hasattr(self.protocol, "resetConnection") and callable(self.protocol.resetConnection):
self.protocol.resetConnection()
self.inSeq = p.seq
self.schedule()
return
elif p.session == None and p.observed == None:
self.response = 1
self.schedule()
elif self.state == confirmed:
if p.session != None or p.observed != None :
if (p.session != None and p.session != self.observed) or (p.observed != None and p.observed != self.sessionID):
self.state = pending
self.observed = p.session
self.resetConnection()
self.inSeq = p.seq
if hasattr(self.protocol, "resetConnection") and callable(self.protocol.resetConnection):
self.protocol.resetConnection()
self.schedule()
return
# check to make sure sequence number isn't out of order
if (p.seq - self.inSeq) % 2**16 >= 256:
return
if self.state == confirmed:
msgs = []
missed = []
# see if they need us to resend anything
for i in p.missed:
if self.outMsgs[i] != None:
self.omsgq.append(self.outMsgs[i])
self.outMsgs[i] = None
# see if we missed any messages
if p.next != None:
missed_count = (p.next - self.inMsg) % 256
if missed_count:
self.lastReceived = time()
for i in range(missed_count):
missed += [(self.outSeq, (self.inMsg + i) % 256)]
self.weMissed += missed
self.response = 1
# record highest message number seen
self.inMsg = (p.next + len(p.msgs)) % 256
# append messages, update sequence
self.imsgq += p.msgs
if self.state == confirmed:
# unpack the observed sequence
tseq = unpack('!H', pack('!H', self.outSeq)[0] + chr(p.oseq))[0]
if ((self.outSeq - tseq)) % 2**16 > 255:
tseq = unpack('!H', chr(ord(pack('!H', self.outSeq)[0]) - 1) + chr(p.oseq))[0]
self.obSeq = tseq
self.inSeq = p.seq
self.lastReceived = time()
self.dataCameIn()
self.schedule()
def sendNext(self):
flags = 0
header = chr(self.inSeq & 255) + pack("!H", self.outSeq)
ids = ""
missed = ""
msgs = ""
# session / observed logic
if self.state == pending:
if self.observed != None:
flags = flags | FLAG_OBSERVED
ids += pack("!L", self.observed)
flags = flags | FLAG_SESSION
ids += pack("!L", self.sessionID)
self.state = sent
elif self.state == sent:
if self.observed != None:
flags = flags | FLAG_SESSION | FLAG_OBSERVED
ids += pack("!LL", self.observed, self.sessionID)
else:
flags = flags | FLAG_SESSION
ids += pack("!L", self.sessionID)
else:
if self.state == sent or self.sendSession != None:
if self.state == confirmed and (self.obSeq - self.sendSession) % 2**16 < 256:
self.sendSession = None
else:
flags = flags | FLAG_SESSION | FLAG_OBSERVED
ids += pack("!LL", self.observed, self.sessionID)
# missed header
if self.obSeq >= 0:
self.weMissed = filter(lambda a: a[0] > self.obSeq, self.weMissed)
if len(self.weMissed) > 0:
flags = flags | FLAG_MISSED
missed += chr(len(self.weMissed) - 1)
for i in self.weMissed:
missed += chr(i[1])
# append any outgoing messages
if self.state == confirmed and self.omsgq:
first = self.next
outstanding = (256 + (((self.next - 1) % 256) - self.outMsgNums[self.obSeq % 256])) % 256
while len(self.omsgq) and outstanding < 255 and len(self.omsgq[-1]) + len(msgs) + len(missed) + len(ids) + len(header) + 1 <= MAX_PACKET_SIZE:
msg = self.omsgq.pop()
msgs += chr(len(msg) - 1) + msg
self.outMsgs[self.next] = msg
self.next = (self.next + 1) % 256
outstanding+=1
# update outgoing message stat
if msgs:
flags = flags | FLAG_NEXT
ids += chr(first)
self.lastTransmitSeq = self.outSeq
#self.outMsgNums[self.outSeq % 256] = first
#else:
self.outMsgNums[self.outSeq % 256] = (self.next - 1) % 256
# do we need a NEXT flag despite not having sent any messages?
if not flags & FLAG_NEXT and (256 + (((self.next - 1) % 256) - self.outMsgNums[self.obSeq % 256])) % 256 > 0:
flags = flags | FLAG_NEXT
ids += chr(self.next)
# update stats and send packet
packet = chr(flags) + header + ids + missed + msgs
self.outSeq = (self.outSeq + 1) % 2**16
self.lastTransmit = time()
self.transport.write(packet, self.addr)
self.scheduled = 0
self.schedule()
def timeToSend(self):
if self.state == pending:
return (1, 0)
outstanding = (256 + (((self.next - 1) % 256) - self.outMsgNums[self.obSeq % 256])) % 256
# any outstanding messages and are we not too far ahead of our counterparty?
if len(self.omsgq) > 0 and self.state != sent and (self.next + 1) % 256 != self.outMsgNums[self.obSeq % 256] and (self.outSeq - self.obSeq) % 2**16 < 256:
#if len(self.omsgq) > 0 and self.state != sent and (self.next + 1) % 256 != self.outMsgNums[self.obSeq % 256] and not outstanding:
return (1, 0)
# do we explicitly need to send a response?
if self.response:
self.response = 0
return (1, 0)
# have we not sent anything in a while?
#elif time() - self.lastTransmit > 1.0:
#return (1, 1)
# nothing to send
return (0, 0)
def schedule(self):
tts, t = self.timeToSend()
if tts and not self.scheduled:
self.scheduled = 1
reactor.callLater(t, self.sendNext)
def write(self, data):
# micropackets can only be 255 bytes or less
if len(data) <= 255:
self.omsgq.insert(0, data)
self.schedule()
def dataCameIn(self):
"""
called when we get a packet bearing messages
"""
for msg in self.imsgq:
self.protocol.dataReceived(msg)
self.imsgq = []
class ustr(str):
"""
this subclass of string encapsulates each ordered message, caches it's sequence number,
and has comparison functions to sort by sequence number
"""
def getseq(self):
if not hasattr(self, 'seq'):
self.seq = unpack("!H", self[0:2])[0]
return self.seq
def __lt__(self, other):
return (self.getseq() - other.getseq()) % 2**16 > 255
def __le__(self, other):
return (self.getseq() - other.getseq()) % 2**16 > 255 or self.__eq__(other)
def __eq__(self, other):
return self.getseq() == other.getseq()
def __ne__(self, other):
return self.getseq() != other.getseq()
def __gt__(self, other):
return (self.getseq() - other.getseq()) % 2**16 < 256 and not self.__eq__(other)
def __ge__(self, other):
return (self.getseq() - other.getseq()) % 2**16 < 256
class StreamConnection(AirhookConnection):
"""
this implements a simple protocol for a stream over airhook
this is done for convenience, instead of making it a twisted.internet.protocol....
the first two octets of each message are interpreted as a 16-bit sequence number
253 bytes are used for payload
"""
def __init__(self):
AirhookConnection.__init__(self)
self.resetStream()
def resetStream(self):
self.oseq = 0
self.iseq = 0
self.q = []
def resetConnection(self):
AirhookConnection.resetConnection(self)
self.resetStream()
def loseConnection(self):
pass
def dataCameIn(self):
# put 'em together
for msg in self.imsgq:
insort_left(self.q, ustr(msg))
self.imsgq = []
data = ""
while self.q and self.iseq == self.q[0].getseq():
data += self.q[0][2:]
self.q = self.q[1:]
self.iseq = (self.iseq + 1) % 2**16
if data != '':
self.protocol.dataReceived(data)
def write(self, data):
# chop it up and queue it up
while data:
p = pack("!H", self.oseq) + data[:253]
self.omsgq.insert(0, p)
data = data[253:]
self.oseq = (self.oseq + 1) % 2**16
self.schedule()
def writeSequence(self, sequence):
for data in sequence:
self.write(data)
def listenAirhook(port, factory):
ah = Airhook()
ah.connection = AirhookConnection
ah.factory = factory
reactor.listenUDP(port, ah)
return ah
def listenAirhookStream(port, factory):
ah = Airhook()
ah.connection = StreamConnection
ah.factory = factory
reactor.listenUDP(port, ah)
return ah
| {
"repo_name": "csm/khashmir",
"path": "airhook.py",
"copies": "1",
"size": "15578",
"license": "mit",
"hash": -7172027387011201000,
"line_mean": 34.8940092166,
"line_max": 162,
"alpha_frac": 0.5299139813,
"autogenerated": false,
"ratio": 4.0305304010349285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5060444382334929,
"avg_score": null,
"num_lines": null
} |
# Airline / Hotel Reservation System - Create a reservation system which books airline
# seats or hotel rooms. It charges various rates for particular sections of the plane or hotel.
# Example, first class is going to cost more than coach. Hotel rooms have penthouse suites which
# cost more. Keep track of when rooms will be available and can be scheduled.
class HotelReservationSystem:
num_of_hotel_reservations = 0
def __int__(self, hotel_room, section, rate):
self.hotel_room = hotel_room
self.section = section
self.rate = rate
HotelReservationSystem.num_of_hotel_reservations += 1
def get_hotel_room(self):
return self.hotel_room
def get_section(self):
return self.section
def get_rate(self):
return self.rate
menuOption = 0
section = 'A'
print("____________________________")
print("| Hotel Reservation System |")
print("|__________________________|")
print("| 1) Reserve A Hotel |")
print("| 2) Exit Program |")
print("|__________________________|")
menuOption = int(input("input: "))
if menuOption == 1:
print("___________________________________________________________________________________________________________")
print("| Reserve A Hotel |")
print("___________________________________________________________________________________________________________")
print("Enter the section you wish to stay in: ")
print("A) Single: A room assigned to one person. May have one or more beds.")
print("B) Double: A room assigned to two people. May have one or more beds.")
print("C) Triple: A room assigned to three people. May have two or more beds.")
print("D) Quad: A room assigned to four people. May have two or more beds.")
print("E) Queen: A room with a queen-sized bed. May be occupied by one or more people.")
print("F) King: A room with a king-sized bed. May be occupied by one or more people.")
print("G) Twin: A room with two beds. May be occupied by one or more people.")
print("H) Double-double: A room with two double (or perhaps queen) beds. May be occupied by one or more people.")
print("I) Studio: A room with a studio bed – a couch that can be converted into a bed. "
"\nMay also have an additional bed.")
print("J) Master Suite: A parlour or living room connected to one or more bedrooms.")
print("K) Mini-Suite or Junior Suite: A single room with a bed and sitting area."
"\nSometimes the sleeping area is in a bedroom separate from the parlour or living room.")
section = str(input("Please select a character: "))
| {
"repo_name": "so0p/unity2dgame",
"path": "schoolGame/Assets/Sprites/Projects/HotelReservationProgram/HotelReservationSystem.py",
"copies": "1",
"size": "2718",
"license": "mit",
"hash": -8921679489183505000,
"line_mean": 50.2452830189,
"line_max": 120,
"alpha_frac": 0.5784241532,
"autogenerated": false,
"ratio": 3.9824046920821115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0048675113654517675,
"num_lines": 53
} |
"""Airmon-ng"""
from .executor import ExecutorHelper
from .models import Interfaces
class AirmonNg(ExecutorHelper):
""" Airmon-NG
Usage: airmon-ng <start|stop|check> <interface> [channel or frequency]
"""
command = 'airmon-ng'
requires_tempfile = False
requires_tempdir = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dirty = False
self.monitor_enabled = []
async def run(self, *args, **kwargs):
"""Check argument position. Forced for this one."""
self.dirty = True
if args:
assert any(a in args[0] for a in ('start', 'stop', 'check'))
return await super().run(*args, **kwargs)
async def __aenter__(self):
"""Put selected interface in monitor mode."""
if not self.run_args[0][0]:
raise RuntimeError('Should be called (airmon()) first.')
ifaces = await self.interfaces
if not any(a.interface == self.run_args[0][0] for a in ifaces):
raise ValueError('Invalid interface selected')
await self.run('start', self.run_args[0][0])
# Save interface data while we're on the async cm.
self._interface_data = await self.interfaces
return self
async def __aexit__(self, *args, **kwargs):
"""Set monitor-enabled interfaces back to normal"""
await self.run('stop', self.monitor_interface)
@property
def monitor_interface(self):
iface = next(a for a in self._interface_data
if a.interface == self.run_args[0][0])
return iface.monitor
@property
async def interfaces(self):
"""List of currently available interfaces as reported by airmon-ng
This is an awaitable property, use it as in::
async with AirmonNg() as airmon:
await airmon.interfaces
Returns: None
"""
if not self.dirty:
await self.run()
self.dirty = False
return Interfaces(await self.readlines())
| {
"repo_name": "XayOn/pyrcrack",
"path": "pyrcrack/airmon.py",
"copies": "1",
"size": "2047",
"license": "isc",
"hash": -5517832959096703000,
"line_mean": 31.4920634921,
"line_max": 74,
"alpha_frac": 0.5959941378,
"autogenerated": false,
"ratio": 4.110441767068273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 63
} |
"""Airodump."""
import os
import asyncio
import xml
from contextlib import suppress
from .executor import ExecutorHelper
from async_timeout import timeout
import xmltodict
import dotmap
from .models import Result
from .models import AccessPoint
class AirodumpNg(ExecutorHelper):
"""Airodump-ng 1.6 - (C) 2006-2020 Thomas d'Otreppe
https://www.aircrack-ng.org
usage: airodump-ng <options> <interface>[,<interface>,...]
Options:
--ivs : Save only captured IVs
--gpsd : Use GPSd
--write <prefix> : Dump file prefix
--beacons : Record all beacons in dump file
--update <secs> : Display update delay in seconds
--showack : Prints ack/cts/rts statistics
-h : Hides known stations for --showack
-f <msecs> : Time in ms between hopping channels
--berlin <secs> : Time before removing the AP/client
from the screen when no more packets
are received (Default: 120 seconds)
-r <file> : Read packets from that file
-T : While reading packets from a file,
simulate the arrival rate of them
as if they were "live".
-x <msecs> : Active Scanning Simulation
--manufacturer : Display manufacturer from IEEE OUI list
--uptime : Display AP Uptime from Beacon Timestamp
--wps : Display WPS information (if any)
--output-format <formats> : Output format. Possible values:
pcap, ivs, csv, gps, kismet, netxml,
logcsv
--ignore-negative-one : Removes the message that says
fixed channel <interface>: -1
--write-interval <seconds> : Output file(s) write interval in
seconds
--background <enable> : Override background detection.
-n <int> : Minimum AP packets recv'd before for
displaying it
--encrypt <suite> : Filter APs by cipher suite
--netmask <netmask> : Filter APs by mask
--bssid <bssid> : Filter APs by BSSID
--essid <essid> : Filter APs by ESSID
--essid-regex <regex> : Filter APs by ESSID using a regular
expression
-a : Filter unassociated clients
--ht20 : Set channel to HT20 (802.11n)
--ht40- : Set channel to HT40- (802.11n)
--ht40+ : Set channel to HT40+ (802.11n)
--channel <channels> : Capture on specific channels
--band <abg> : Band on which airodump-ng should hop
-C <frequencies> : Uses these frequencies in MHz to hop
--cswitch <method> : Set channel switching method
-s : same as --cswitch
--help : Displays this usage screen
"""
requires_tempfile = False
requires_tempdir = True
command = "airodump-ng"
async def run(self, *args, **kwargs):
"""Run async, with prefix stablished as tempdir."""
self.execn += 1
kwargs = {
'background': 1,
'write': self.tempdir.name + '/' + self.uuid,
'write-interval': 1,
'output-format': 'netxml,logcsv',
**kwargs
}
if 'kismet' not in kwargs.get('output-format', ''):
kwargs['output-format'] += ',netxml'
return await super().run(*args, **kwargs)
def get_file(self, format) -> str:
"""Return csv file, not kismet one.
Arguments:
format: File extension to retrieve (kismet.csv kismet.xml csv
log.csv or pcap)
Returns: full filename
"""
return f"{self.tempdir.name}/{self.uuid}-{self.execn:02}.{format}"
@property
async def results(self) -> list:
"""Return a list of currently detected access points
Returns: List of AccessPoint instances
"""
file = self.get_file('kismet.netxml')
try:
# Wait for a sensible 3 seconds for netxml file to be generated and
# process to be running
async with timeout(3):
while not os.path.exists(file):
await asyncio.sleep(1)
while not self.proc:
# Check if airodump is running, otherwise wait more.
await asyncio.sleep(1)
except asyncio.exceptions.TimeoutError:
# No file had been generated or process hadn't started in 3
# seconds.
raise Exception(await self.proc.communicate())
while self.running:
# Avoid crashing on file creation
with suppress(ValueError, xml.parsers.expat.ExpatError):
xmla = xmltodict.parse(open(file).read())
dotmap_data = dotmap.DotMap(xmla)
results = dotmap_data['detection-run']['wireless-network']
if results:
if isinstance(results, list):
return Result(
sorted([AccessPoint(ap) for ap in results],
key=lambda x: x.score,
reverse=True))
else:
return Result([AccessPoint(results)])
return Result([])
await asyncio.sleep(1)
| {
"repo_name": "XayOn/pyrcrack",
"path": "pyrcrack/airodump.py",
"copies": "1",
"size": "5875",
"license": "isc",
"hash": 5396743684128508000,
"line_mean": 40.6666666667,
"line_max": 79,
"alpha_frac": 0.5057021277,
"autogenerated": false,
"ratio": 4.722668810289389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5728370937989389,
"avg_score": null,
"num_lines": null
} |
airplanes = spark.read.json('data/airplanes.json')
airplanes.registerTempTable("airplanes")
manufacturer_variety = spark.sql(
"""SELECT
DISTINCT(Manufacturer) AS Manufacturer
FROM
airplanes
ORDER BY
Manufacturer"""
)
manufacturer_variety_local = manufacturer_variety.collect()
# We need to print these left justified
for mfr in manufacturer_variety_local:
print(mfr.Manufacturer)
# Detect the longest common beginning string in a pair of strings
def longest_common_beginning(s1, s2):
if s1 == s2:
return s1
min_length = min(len(s1), len(s2))
i = 0
while i < min_length:
if s1[i] == s2[i]:
i += 1
else:
break
return s1[0:i]
# Compare two manufacturers, returning a tuple describing the result
def compare_manufacturers(mfrs):
mfr1 = mfrs[0]
mfr2 = mfrs[1]
lcb = longest_common_beginning(mfr1, mfr2)
lcb = lcb.strip() # remove extra spaces
len_lcb = len(lcb)
record = {
'mfr1': mfr1,
'mfr2': mfr2,
'lcb': lcb,
'len_lcb': len_lcb,
'eq': mfr1 == mfr2
}
return record
# Pair every unique instance of Manufacturer field with every other for comparison
comparison_pairs = manufacturer_variety.crossJoin(manufacturer_variety)
# Do the comparisons
comparisons = comparison_pairs.rdd.map(compare_manufacturers)
# Matches have > 5 starting chars in common
matches = comparisons.filter(lambda f: f['eq'] == False and f['len_lcb'] > 5)
#
# Now we create a mapping of duplicate keys from their raw value to the one we're going to use
#
# 1) Group the matches by the longest common beginning ('lcb')
common_lcbs = matches.groupBy(lambda x: x['lcb'])
# 2) Emit the raw value for each side of the match along with the key, our 'lcb'
mfr1_map = common_lcbs.map(lambda x: [(y['mfr1'], x[0]) for y in x[1]]).flatMap(lambda x: x)
mfr2_map = common_lcbs.map(lambda x: [(y['mfr2'], x[0]) for y in x[1]]).flatMap(lambda x: x)
# 3) Combine the two sides of the comparison's records
map_with_dupes = mfr1_map.union(mfr2_map)
# 4) Remove duplicates
mfr_dedupe_mapping = map_with_dupes.distinct()
# 5) Convert mapping to dataframe to join to airplanes dataframe
mapping_dataframe = mfr_dedupe_mapping.toDF()
# 6) Give the mapping column names
mapping_dataframe.registerTempTable("mapping_dataframe")
mapping_dataframe = spark.sql(
"SELECT _1 AS Raw, _2 AS NewManufacturer FROM mapping_dataframe"
)
# JOIN our mapping left outer...
airplanes_w_mapping = airplanes.join(
mapping_dataframe,
on=airplanes.Manufacturer == mapping_dataframe.Raw,
how='left_outer'
)
# Now replace Manufacturer with NewManufacturer where needed
airplanes_w_mapping.registerTempTable("airplanes_w_mapping")
resolved_airplanes = spark.sql("""SELECT
TailNum,
SerialNumber,
Owner,
OwnerState,
IF(NewManufacturer IS NOT null,NewManufacturer,Manufacturer) AS Manufacturer,
Model,
ManufacturerYear,
EngineManufacturer,
EngineModel
FROM
airplanes_w_mapping""")
# Store for later use, in place of airplanes.json
resolved_airplanes.repartition(1).write.mode("overwrite").json("data/resolved_airplanes.json")
| {
"repo_name": "rjurney/Agile_Data_Code_2",
"path": "ch06/resolve_airplane_manufacturers.py",
"copies": "1",
"size": "3067",
"license": "mit",
"hash": 2595555408379708000,
"line_mean": 28.4903846154,
"line_max": 94,
"alpha_frac": 0.7241604173,
"autogenerated": false,
"ratio": 3.060878243512974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42850386608129737,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.