text stringlengths 0 1.05M | meta dict |
|---|---|
#a simple model with a single hidden layer might look like this:
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
# X has shape (num_rows, num_cols), where the training data are stored
# as row vectors
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
# y must have an output vector for each input vector
y = np.array([[0], [0], [0], [1]], dtype=np.float32)
# Create the Sequential model
model = Sequential()
# 1st Layer - Add an input layer of 32 nodes with the same input shape as
# the training samples in X
model.add(Dense(32, input_dim=X.shape[1]))
# 2rd Layer - Add a softmax activation layer
model.add(Activation('softmax'))
# 4th Layer - Add a fully connected output layer
model.add(Dense(1))
# 5th Layer - Add a sigmoid activation layer
model.add(Activation('sigmoid'))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics = ["accuracy"])
model.summary()
model.fit(X, y, nb_epoch=1000, verbose=0)
model.evaluate() | {
"repo_name": "coolsgupta/machine_learning_nanodegree",
"path": "Deep_Learning/Deep_neural_networks/keras_steps.py",
"copies": "1",
"size": "1055",
"license": "mit",
"hash": 3788796258949207000,
"line_mean": 28.3333333333,
"line_max": 88,
"alpha_frac": 0.6947867299,
"autogenerated": false,
"ratio": 3.414239482200647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4609026212100647,
"avg_score": null,
"num_lines": null
} |
"""A simple module for dealing with preferences that are used by scripts. Based almost entirely on MacPrefs.
To save some preferences:
myPrefs = RFPrefs(drive/directory/directory/myPrefs.plist)
myPrefs.myString = 'xyz'
myPrefs.myInteger = 1234
myPrefs.myList = ['a', 'b', 'c']
myPrefs.myDict = {'a':1, 'b':2}
myPrefs.save()
To retrieve some preferences:
myPrefs = RFPrefs(drive/directory/directory/myPrefs.plist)
myString = myPrefs.myString
myInteger = myPrefs.myInteger
myList = myPrefs.myList
myDict = myPrefs.myDict
When using this module within FontLab, it is not necessary to
provide the RFPrefs class with a path. If a path is not given,
it will look for a file in FontLab/RoboFab Data/RFPrefs.plist.
If that file does not exist, it will make it.
"""
from robofab import RoboFabError
from robofab.plistlib import Plist
from cStringIO import StringIO
import os
class _PrefObject:
def __init__(self, dict=None):
if not dict:
self._prefs = {}
else:
self._prefs = dict
def __len__(self):
return len(self._prefs)
def __delattr__(self, attr):
if self._prefs.has_key(attr):
del self._prefs[attr]
else:
raise AttributeError, 'delete non-existing instance attribute'
def __getattr__(self, attr):
if attr == '__members__':
keys = self._prefs.keys()
keys.sort()
return keys
try:
return self._prefs[attr]
except KeyError:
raise AttributeError, attr
def __setattr__(self, attr, value):
if attr[0] != '_':
self._prefs[attr] = value
else:
self.__dict__[attr] = value
def asDict(self):
return self._prefs
class RFPrefs(_PrefObject):
"""The main preferences object to call"""
def __init__(self, path=None):
from robofab.world import world
self.__path = path
self._prefs = {}
if world.inFontLab:
#we don't have a path, but we know where we can put it
if not path:
from robofab.tools.toolsFL import makeDataFolder
settingsPath = makeDataFolder()
path = os.path.join(settingsPath, 'RFPrefs.plist')
self.__path = path
self._makePrefsFile()
#we do have a path, make sure it exists and load it
else:
self._makePrefsFile()
else:
#no path, raise error
if not path:
raise RoboFabError, "no preferences path defined"
#we do have a path, make sure it exists and load it
else:
self._makePrefsFile()
self._prefs = Plist.fromFile(path)
def _makePrefsFile(self):
if not os.path.exists(self.__path):
self.save()
def __getattr__(self, attr):
if attr[0] == '__members__':
keys = self._prefs.keys()
keys.sort()
return keys
try:
return self._prefs[attr]
except KeyError:
raise AttributeError, attr
#if attr[0] != '_':
# self._prefs[attr] = _PrefObject()
# return self._prefs[attr]
#else:
# raise AttributeError, attr
def save(self):
"""save the plist file"""
f = StringIO()
pl = Plist()
for i in self._prefs.keys():
pl[i] = self._prefs[i]
pl.write(f)
data = f.getvalue()
f = open(self.__path, 'wb')
f.write(data)
f.close()
| {
"repo_name": "moyogo/robofab",
"path": "Lib/robofab/tools/rfPrefs.py",
"copies": "8",
"size": "3021",
"license": "bsd-3-clause",
"hash": -7774102452557674000,
"line_mean": 23.762295082,
"line_max": 108,
"alpha_frac": 0.6689837802,
"autogenerated": false,
"ratio": 2.9851778656126484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7654161645812648,
"avg_score": null,
"num_lines": null
} |
"""A simple money counting game for kids."""
import random
import sys
class Money:
def __init__(self):
pass
@staticmethod
def display_intro():
"""Display the introduction at the start of program execution."""
print('*' * 75)
print('This is a simple money countinggame to help kids learn '
'to count money.')
print('The program helps kids learn various change amounts in US '
'currency.')
print('*' * 75)
def start(self):
"""Randomly display an amount of change and ask how many of each coin
type are needed to equal the amount displayed.
"""
self.display_intro()
currency_amt = random.randint(1, 99)
print('\nHow much change is needed to equal .{0} cents?\n'
.format(str(currency_amt)))
your_total_amt = get_input_values(currency_amt)
if sum(your_total_amt) == 0:
print('Thank you for playing.')
sys.exit(0)
else:
if your_total_amt[0] > 1 or your_total_amt[0] == 0:
quarter_spelling = 'quarters'
else:
quarter_spelling = 'quarter'
if your_total_amt[1] > 1 or your_total_amt[1] == 0:
dime_spelling = 'dimes'
else:
dime_spelling = 'dime'
if your_total_amt[2] > 1 or your_total_amt[2] == 0:
nickel_spelling = 'nickels'
else:
nickel_spelling = 'nickel'
if your_total_amt[3] > 1 or your_total_amt[3] == 0:
penny_spelling = 'pennies'
else:
penny_spelling = 'penny'
print('\nCorrect! You entered {0:d} {1}, {2:d} {3},'
' {4:d} {5} and {6:d} {7}.'.format(your_total_amt[0],
quarter_spelling,
your_total_amt[1],
dime_spelling,
your_total_amt[2],
nickel_spelling,
your_total_amt[3],
penny_spelling))
print('Which equals .{0} cents. Nice job!'
.format(str(currency_amt)))
response = input('\nWould you like to try again? ')
if response.lower() != 'y':
print('Thanks for playing.')
sys.exit(0)
self.start()
def get_input_values(currency_amt):
"""Main logic of the program that tallies the value of each entered
coin. Validation on the values entered is also performed.
"""
quarter = 25
dime = 10
nickel = 5
penny = 1
total_amt = 0
total_quarters = 0
total_dimes = 0
total_nickels = 0
total_pennies = 0
print('Enter change in the form of (25 = quarter, 10 = dime,'
' 5 = nickel, 1 = penny)')
coin_value = input('Enter coin amount: ')
while len(coin_value) > 0:
try:
coin_amt = int(coin_value)
if not coin_amt not in (quarter, dime, nickel, penny):
if coin_amt < currency_amt or coin_amt < total_amt:
if (coin_amt + total_amt) <= currency_amt:
if (coin_amt + total_amt) != currency_amt:
if coin_amt == 25:
total_quarters += 1
total_amt += quarter
elif coin_amt == 10:
total_dimes += 1
total_amt += dime
elif coin_amt == 5:
total_nickels += 1
total_amt += nickel
elif coin_amt == 1:
total_pennies += 1
total_amt += penny
else:
print('This is not a valid amount!\n')
print('Enter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
else:
if coin_amt == 25:
total_quarters += 1
elif coin_amt == 10:
total_dimes += 1
elif coin_amt == 5:
total_nickels += 1
elif coin_amt == 1:
total_pennies += 1
break
else:
print('You have entered more than I currently have'
' totalled up!')
print('\nI currently have a total of .{0} and need to get to .{1}'
.format(str(total_amt), str(currency_amt)))
print('Enter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
else:
if (coin_amt + total_amt) > currency_amt:
print('You entered more than what I need')
print('Enter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
if (coin_amt + total_amt) != currency_amt:
print('\nEnter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
else:
if coin_amt == 25:
total_quarters += 1
elif coin_amt == 10:
total_dimes += 1
elif coin_amt == 5:
total_nickels += 1
elif coin_amt == 1:
total_pennies += 1
break
else:
print('This is not a valid amount!\n')
print('\nEnter change in the form of (25 = quarter,'
' 10 = dime, 5 = nickel, 1 = penny)')
coin_value = input('\nEnter coin amount: ')
except ValueError:
print('This is not a valid amount!')
coin_value = input('\nEnter coin amount: ')
currency_totals = (total_quarters, total_dimes, total_nickels,
total_pennies)
return currency_totals
if __name__ == '__main__':
money_game = Money()
money_game.start()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578988_Money_Game/recipe-578988.py",
"copies": "1",
"size": "7075",
"license": "mit",
"hash": 2561880338605069000,
"line_mean": 40.3742690058,
"line_max": 90,
"alpha_frac": 0.4130035336,
"autogenerated": false,
"ratio": 4.609120521172638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000664053295632243,
"num_lines": 171
} |
"""A simple monitor with alerts for Unix
"""
import argparse
import logging
import sys
from simple_monitor_alert.sma import SMA, SMAService
SMA_INI_FILE = '/etc/simple-monitor-alert/sma.ini'
MONITORS_DIR = '/etc/simple-monitor-alert/monitors-enabled/'
ALERTS_DIR = '/etc/simple-monitor-alert/alerts/'
def create_logger(name, level=logging.INFO):
# create logger
logger = logging.getLogger(name)
logger.setLevel(level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)-7s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def set_default_subparser(self, name, args=None):
"""default subparser selection. Call after setup, just before parse_args()
name: is the name of the subparser to call by default
args: if set is the argument list handed to parse_args()
, tested with 2.7, 3.2, 3.3, 3.4
it works with 2.6 assuming argparse is installed
"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = set_default_subparser
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--monitors-dir', default=MONITORS_DIR)
parser.add_argument('--alerts-dir', default=ALERTS_DIR)
parser.add_argument('--config', default=SMA_INI_FILE)
parser.add_argument('--warning', help='set logging to warning', action='store_const', dest='loglevel',
const=logging.WARNING, default=logging.INFO)
parser.add_argument('--quiet', help='set logging to ERROR', action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
parser.add_argument('--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
parser.add_argument('--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
parser.sub = parser.add_subparsers()
parse_service = parser.sub.add_parser('service', help='Run SMA as service (daemon).')
parse_service.set_defaults(which='service')
parse_oneshot = parser.sub.add_parser('one-shot', help='Run SMA once and exit')
parse_oneshot.set_defaults(which='one-shot')
parse_alerts = parser.sub.add_parser('alerts', help='Alerts options.')
parse_alerts.set_defaults(which='alerts')
parse_alerts.add_argument('--test', help = 'Test alert', action='store_true')
parse_alerts.add_argument('alert_section', nargs='?', help='Alert section to see')
parse_results = parser.sub.add_parser('results', help='Monitors results')
parse_results.set_defaults(which='results')
parser.set_default_subparser('one-shot')
args = parser.parse_args(argv[1:])
create_logger('sma', args.loglevel)
if not getattr(args, 'which', None) or args.which == 'one-shot':
sma = SMA(args.monitors_dir, args.alerts_dir, args.config)
sma.evaluate_and_alert()
elif args.which == 'service':
sma = SMAService(args.monitors_dir, args.alerts_dir, args.config)
sma.start()
elif args.which == 'alerts' and args.test:
sma = SMA(args.monitors_dir, args.alerts_dir, args.config)
sma.alerts.test()
elif args.which == 'results':
print(SMA(args.monitors_dir, args.alerts_dir, args.config).results)
| {
"repo_name": "Nekmo/simple-monitor-alert",
"path": "simple_monitor_alert/management.py",
"copies": "1",
"size": "4378",
"license": "mit",
"hash": 4622013368238354000,
"line_mean": 36.4188034188,
"line_max": 106,
"alpha_frac": 0.6407035176,
"autogenerated": false,
"ratio": 3.761168384879725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4901871902479725,
"avg_score": null,
"num_lines": null
} |
"""A simple mool extension to bump maven dependency version in a BLD file."""
import argparse
import mool.shared_utils as su
import utils.bld_parser as bp
import utils.file_utils as fu
def _parse_command_line(program_name, cmd_line):
"""Parse command line to generate arguments."""
parser = argparse.ArgumentParser(prog=program_name)
parser.add_argument('-r', '--rule_name', type=str,
help='build rule name, example HadoopAnnotations')
parser.add_argument('-a', '--artifact', type=str, default='',
help='artifact id, example hadoop-client')
parser.add_argument('-c', '--classifier', type=str,
help='classifier, example source')
parser.add_argument('-g', '--group', type=str, default='',
help='group id, example org.apache.hadoop')
parser.add_argument('-ov', '--old_version', type=str,
help='old version number to update, example 1.2.3')
parser.add_argument('-f', '--bld_file', type=str, required=True,
help='full path to BLD file to update')
parser.add_argument('-nv', '--new_version', type=str, required=True,
help='new version number to set, example 1.2.3')
return parser.parse_args(cmd_line)
def main(program_name, cmd_line):
"""Main function for this module."""
args = _parse_command_line(program_name, cmd_line)
match_indexes = []
bld_list = bp.bld_to_list(args.bld_file)
for index in xrange(len(bld_list)):
item = bld_list[index]
if isinstance(item, dict):
if su.MAVEN_SPECS_KEY in item[bp.RULE_BODY_KEY]:
specs = item[bp.RULE_BODY_KEY][su.MAVEN_SPECS_KEY]
match = any([
item[bp.RULE_NAME_KEY] == args.rule_name,
all([args.rule_name is None,
specs[su.MAVEN_ARTIFACT_ID_KEY] == args.artifact,
specs[su.MAVEN_GROUP_ID_KEY] == args.group,
(args.classifier is None or
specs.get(su.MAVEN_CLASSIFIER_KEY, '') == args.classifier),
(args.old_version is None or
specs[su.MAVEN_VERSION_KEY] == args.old_version)])])
if match:
match_indexes.append(index)
if len(match_indexes) == 0:
raise su.Error('Couldn\'t find requested build rule in {} file.'.format(
args.bld_file))
if len(match_indexes) > 1:
raise su.Error('Update failed, more than 1 matches found. '
'Provide more info!!')
index = match_indexes[0]
rule_body = bld_list[index][bp.RULE_BODY_KEY]
old_version = rule_body[su.MAVEN_SPECS_KEY][su.MAVEN_VERSION_KEY]
rule_body[su.MAVEN_SPECS_KEY][su.MAVEN_VERSION_KEY] = args.new_version
bld_list[index][bp.RULE_BODY_KEY] = rule_body
fu.write_file(args.bld_file, bp.list_to_bld_string(bld_list))
msg = 'Successfully update version from {} to {}'.format(old_version,
args.new_version)
return (0, msg)
| {
"repo_name": "rocketfuel/mool",
"path": "build_tool/bu.scripts/extensions/bump_mvn_version.py",
"copies": "1",
"size": "2989",
"license": "bsd-3-clause",
"hash": -2826822727679306000,
"line_mean": 44.2878787879,
"line_max": 77,
"alpha_frac": 0.6045500167,
"autogenerated": false,
"ratio": 3.5498812351543942,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4654431251854394,
"avg_score": null,
"num_lines": null
} |
"""A simple multi-agent env with two agents playing rock paper scissors.
This demonstrates running the following policies in competition:
(1) heuristic policy of repeating the same move
(2) heuristic policy of beating the last opponent move
(3) LSTM/feedforward PG policies
(4) LSTM policy with custom entropy loss
"""
import random
from gym.spaces import Discrete
from ray import tune
from ray.rllib.agents.pg.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
ROCK = 0
PAPER = 1
SCISSORS = 2
class RockPaperScissorsEnv(MultiAgentEnv):
"""Two-player environment for rock paper scissors.
The observation is simply the last opponent action."""
def __init__(self, _):
self.action_space = Discrete(3)
self.observation_space = Discrete(3)
self.player1 = "player1"
self.player2 = "player2"
self.last_move = None
self.num_moves = 0
def reset(self):
self.last_move = (0, 0)
self.num_moves = 0
return {
self.player1: self.last_move[1],
self.player2: self.last_move[0],
}
def step(self, action_dict):
move1 = action_dict[self.player1]
move2 = action_dict[self.player2]
self.last_move = (move1, move2)
obs = {
self.player1: self.last_move[1],
self.player2: self.last_move[0],
}
r1, r2 = {
(ROCK, ROCK): (0, 0),
(ROCK, PAPER): (-1, 1),
(ROCK, SCISSORS): (1, -1),
(PAPER, ROCK): (1, -1),
(PAPER, PAPER): (0, 0),
(PAPER, SCISSORS): (-1, 1),
(SCISSORS, ROCK): (-1, 1),
(SCISSORS, PAPER): (1, -1),
(SCISSORS, SCISSORS): (0, 0),
}[move1, move2]
rew = {
self.player1: r1,
self.player2: r2,
}
self.num_moves += 1
done = {
"__all__": self.num_moves >= 10,
}
return obs, rew, done, {}
class AlwaysSameHeuristic(Policy):
"""Pick a random move and stick with it for the entire episode."""
def get_initial_state(self):
return [random.choice([ROCK, PAPER, SCISSORS])]
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
return list(state_batches[0]), state_batches, {}
def learn_on_batch(self, samples):
pass
def get_weights(self):
pass
def set_weights(self, weights):
pass
class BeatLastHeuristic(Policy):
"""Play the move that would beat the last move of the opponent."""
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
def successor(x):
if x[ROCK] == 1:
return PAPER
elif x[PAPER] == 1:
return SCISSORS
elif x[SCISSORS] == 1:
return ROCK
return [successor(x) for x in obs_batch], [], {}
def learn_on_batch(self, samples):
pass
def get_weights(self):
pass
def set_weights(self, weights):
pass
def run_same_policy():
"""Use the same policy for both agents (trivial case)."""
tune.run("PG", config={"env": RockPaperScissorsEnv})
def run_heuristic_vs_learned(use_lstm=False, trainer="PG"):
"""Run heuristic policies vs a learned agent.
The learned agent should eventually reach a reward of ~5 with
use_lstm=False, and ~7 with use_lstm=True. The reason the LSTM policy
can perform better is since it can distinguish between the always_same vs
beat_last heuristics.
"""
def select_policy(agent_id):
if agent_id == "player1":
return "learned"
else:
return random.choice(["always_same", "beat_last"])
tune.run(
trainer,
stop={"timesteps_total": 400000},
config={
"env": RockPaperScissorsEnv,
"gamma": 0.9,
"num_workers": 4,
"num_envs_per_worker": 4,
"sample_batch_size": 10,
"train_batch_size": 200,
"multiagent": {
"policies_to_train": ["learned"],
"policies": {
"always_same": (AlwaysSameHeuristic, Discrete(3),
Discrete(3), {}),
"beat_last": (BeatLastHeuristic, Discrete(3), Discrete(3),
{}),
"learned": (None, Discrete(3), Discrete(3), {
"model": {
"use_lstm": use_lstm
}
}),
},
"policy_mapping_fn": select_policy,
},
})
def run_with_custom_entropy_loss():
"""Example of customizing the loss function of an existing policy.
This performs about the same as the default loss does."""
def entropy_policy_gradient_loss(policy, model, dist_class, train_batch):
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return (-0.1 * action_dist.entropy() - tf.reduce_mean(
action_dist.logp(train_batch["actions"]) *
train_batch["advantages"]))
EntropyPolicy = PGTFPolicy.with_updates(
loss_fn=entropy_policy_gradient_loss)
EntropyLossPG = PGTrainer.with_updates(
name="EntropyPG", get_policy_class=lambda _: EntropyPolicy)
run_heuristic_vs_learned(use_lstm=True, trainer=EntropyLossPG)
if __name__ == "__main__":
# run_same_policy()
# run_heuristic_vs_learned(use_lstm=False)
run_heuristic_vs_learned(use_lstm=False)
# run_with_custom_entropy_loss()
| {
"repo_name": "stephanie-wang/ray",
"path": "rllib/examples/rock_paper_scissors_multiagent.py",
"copies": "1",
"size": "6325",
"license": "apache-2.0",
"hash": -485597076072613440,
"line_mean": 29.7038834951,
"line_max": 78,
"alpha_frac": 0.5397628458,
"autogenerated": false,
"ratio": 3.7271655863288156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4766928432128815,
"avg_score": null,
"num_lines": null
} |
"""A simple multi-step wizard that uses the flask application session.
Creating multi-step forms of arbitrary length is simple and intuitive.
Example usage:
```
from flask.ext.wtf import FlaskForm
class MultiStepTest1(FlaskForm):
field1 = StringField(validators=[validators.DataRequired()],)
field2 = StringField(validators=[validators.DataRequired()],)
class MultiStepTest2(FlaskForm):
field3 = StringField(validators=[validators.DataRequired()],)
field4 = StringField(validators=[validators.DataRequired()],)
class MyCoolForm(MultiStepWizard):
__forms__ = [
MultiStepTest1,
MultiStepTest2,
]
```
"""
from flask import session
from flask_wtf import FlaskForm
class MultiStepWizard(FlaskForm):
"""Generates a multi-step wizard.
The wizard uses the app specified session backend to store both
form data and current step.
TODO: make sure all the expected features of the typical form
are exposed here, but automatically determining the active form
and deferring to it. See __iter__ and data for examples.
"""
__forms__ = []
def __iter__(self):
"""Get the specific forms' fields for standard WTForm iteration."""
_, form = self.get_active()
return form.__iter__()
def __len__(self):
"""Override the len method to emulate standard wtforms."""
return len(self.__forms)
def __getitem__(self, key):
"""Override getitem to emulate standard wtforms."""
return self.active_form.__getitem__(key)
def __contains__(self, item):
"""Override contains to emulate standard wtforms."""
return self.active_form.__contains__(item)
def __init__(self, *args, **kwargs):
"""Do all the required setup for managing the forms."""
super(MultiStepWizard, self).__init__(*args, **kwargs)
# Store the name and session key by a user specified kwarg,
# or fall back to this class name.
self.name = kwargs.get('session_key', self.__class__.__name__)
# Get the sessions' current step if it exists.
curr_step = session.get(self.name, {}).get('curr_step', 1)
# if the user specified a step, we'll use that instead. Form validation
# will still occur, but this is useful for when the user may need
# to go back a step or more.
if 'curr_step' in kwargs:
curr_step = int(kwargs.pop('curr_step'))
if curr_step > len(self.__forms__):
curr_step = 1
self.step = curr_step
# Store forms in a dunder because we want to avoid conflicts
# with any WTForm objects or third-party libs.
self.__forms = []
self._setup_session()
self._populate_forms()
invalid_forms_msg = 'Something happened during form population.'
assert len(self.__forms) == len(self.__forms__), invalid_forms_msg
assert len(self.__forms) > 0, 'Need at least one form!'
self.active_form = self.get_active()[1]
# Inject the required fields for the active form.
# The multiform class will always be instantiated once
# on account of separate POST requests, and so the previous form
# values will no longer be attributes to be concerned with.
self._setfields()
def _setfields(self):
"""Dynamically set fields for this particular form step."""
_, form = self.get_active()
for name, val in vars(form).items():
if repr(val).startswith('<UnboundField'):
setattr(self, name, val)
def alldata(self, combine_fields=False, flush_after=False):
"""Get the specific forms data."""
_alldata = dict()
# Get all session data, combine if specified,
# and delete session if specified.
if self.name in session:
_alldata = session[self.name].get('data')
if combine_fields:
combined = dict()
for formname, data in _alldata.items():
if data is not None:
combined.update(data)
_alldata = combined
if flush_after:
self.flush()
return _alldata
@property
def data(self):
"""Get the specific forms data."""
_, form = self.get_active()
return form.data
@property
def forms(self):
"""Get all forms."""
return self.__forms
def _setup_session(self):
"""Setup session placeholders for later use."""
# We will populate these values as the form progresses,
# but only if it doesn't already exist from a previous step.
if self.name not in session:
session[self.name] = dict(
curr_step=self.curr_step,
data={f.__name__: None for f in self.__forms__})
def _populate_forms(self):
"""Populate all forms with existing data for validation.
This will only be done if the session data exists for a form.
"""
# We've already populated these forms, don't do it again.
if len(self.__forms) > 0:
return
for form in self.__forms__:
data = session[self.name]['data'].get(form.__name__)
init_form = form(**data) if data is not None else form()
self.__forms.append(init_form)
def _update_session_formdata(self, form):
"""Update session data for a given form key."""
# Add data to session for this current form!
name = form.__class__.__name__
data = form.data
# Update the session data for this particular form step.
# The policy here is to always clobber old data.
session[self.name]['data'][name] = data
@property
def active_name(self):
"""Return the nice name of this form class."""
return self.active_form.__class__.__name__
def next_step(self):
"""Set the step number in the session to the next value."""
next_step = session[self.name]['curr_step'] + 1
self.curr_step = next_step
if self.name in session:
session[self.name]['curr_step'] += 1
@property
def step(self):
"""Get the current step."""
if self.name in session:
return session[self.name]['curr_step']
@step.setter
def step(self, step_val):
"""Set the step number in the session."""
self.curr_step = step_val
if self.name in session:
session[self.name]['curr_step'] = step_val
def validate_on_submit(self, *args, **kwargs):
"""Override validator and setup session updates for persistence."""
# Update the step to the next form automagically for the user
step, form = self.get_active()
self._update_session_formdata(form)
if not form.validate_on_submit():
self.step = step - 1
return False
# Update to next form if applicable.
if step - 1 < len(self.__forms):
self.curr_step += 1
self.active_form = self.__forms[self.curr_step - 1]
self.next_step()
# Mark the current step as -1 to indicate it has been
# fully completed, if the current step is the final step.
elif step - 1 == len(self.__forms):
self.step = -1
return True
@property
def remaining(self):
"""Get the number of steps remaining."""
return len(self.__forms[self.curr_step:]) + 1
@property
def total_steps(self):
"""Get the number of steps for this form in a (non-zero index)."""
return len(self.__forms)
@property
def steps(self):
"""Get a list of the steps for iterating in views, html, etc."""
return range(1, self.total_steps + 1)
def get_active(self):
"""Get active step."""
form_index = self.curr_step - 1 if self.curr_step > 0 else 0
return self.curr_step + 1, self.__forms[form_index]
def flush(self):
"""Clear data and reset."""
del session[self.name]
def is_complete(self):
"""Determine if all forms have been completed."""
if self.name not in session:
return False
# Make the current step index something unique for being "complete"
completed = self.step == -1
if completed:
# Reset.
self.curr_step = 1
return completed
| {
"repo_name": "christabor/flask_extras",
"path": "flask_extras/forms/wizard.py",
"copies": "2",
"size": "8426",
"license": "mit",
"hash": -8154990187578291000,
"line_mean": 34.7033898305,
"line_max": 79,
"alpha_frac": 0.5954189414,
"autogenerated": false,
"ratio": 4.26417004048583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5859588981885829,
"avg_score": null,
"num_lines": null
} |
"""A simple music-representation language suitable for hymn tunes,
part-songs, and other brief, vocal-style works.
"""
# TODO: figure out how to make fermatas in bass staves upside-down in
# the template
import codecs
import copy
from parsimonious import Grammar, NodeVisitor
from doremi.lilypond import *
from doremi.lyric_parser import Lyric, LyricParser
class RepeatMarker(object):
def __init__(self, text):
self.text = text
def to_lilypond(self, *args, **kwargs):
if self.text == "|:":
return r"\repeat volta 2 {"
elif self.text == ":|":
return r"}"
elif self.text == "!":
return r"} \alternative { {"
elif self.text == "1!":
return r"} {"
elif self.text == "2!":
return r"} }"
elif self.text == "|.":
return r'\bar "|."'
elif self.text == "||":
return r'\bar "||"'
class Note(object):
"""Represents a note (or rest) in a musical work, including scale
degree, duration, octave, and other information"""
def __init__(self, # initialize with empty properties
pitch=None, # because they are built on-the-fly
duration=None,
octave=None,
modifiers=list()):
self.pitch = pitch
self.duration = duration
self.octave = octave
self.modifiers = modifiers
def to_lilypond(self, key, octave_offset = 0):
"""
Convert to an equivalent Lilypond representation
"""
# short-circuit if this is a rest
if self.pitch == "r":
return "%s%s" % (self.pitch, self.duration)
pitch = syllable_to_note(self.pitch, key)
octave = self.octave + octave_offset + 1
# convert internal octave representation to Lilypond, which
# uses c->b
offset = key_octave_offset[key.lower()]
local_pitch_level = copy.copy(pitch_level)
# adjust the local copy of the pitch-level order to go from
# la->sol if key is minor
if "minor" in key.lower():
for k in local_pitch_level.keys():
local_pitch_level[k] = local_pitch_level[k] + 2
if local_pitch_level[k] > 6:
local_pitch_level[k] -= 7
if local_pitch_level[self.pitch] - offset < 0:
octave -= 1
elif local_pitch_level[self.pitch] - offset > 6:
octave += 1
if octave < 0:
octave = "," * abs(octave)
else:
octave = "'" * octave
# start or end slurs (or beams) as indicated by modifiers
slur = ""
if "slur" in self.modifiers:
if self.duration in ["8", "8.", "16"]:
slur = "["
else:
slur = "("
elif "end slur" in self.modifiers:
if self.duration in ["8", "8.", "16"]:
slur = "]"
else:
slur = ")"
# ties only ever connect two notes, so need not be explicitly
# terminated
tie = ""
if "tie" in self.modifiers:
tie = "~"
# add a fermata
if "fermata" in self.modifiers:
fermata = r"\fermata"
else:
fermata = ""
# assemble and return the Lilypond string
return "%s%s%s%s%s%s" % (pitch,
octave,
self.duration,
tie,
slur,
fermata)
class Voice(list):
"""Represents a named part in a vocal-style composition"""
def __init__(self,
name="",
octave=""):
list.__init__(self)
self.name = name
self.octave = octave # the starting octave for the part
def last_note(self):
index = -1
try:
while type(self[index]) != Note:
index -= 1
return self[index]
except IndexError:
raise IndexError("No previous notes")
def to_lilypond(self,
time,
key,
octave_offset=0,
shapes=None,
template="default"):
"""A representation of the voice as a Lilypond string"""
# association of doremi shape args and Lilypond shape commands
shape_dic = {"round": ("", ""),
"aikin": (r"\aikenHeads", "Minor"),
"sacredharp": (r"\sacredHarpHeads", "Minor"),
"southernharmony": (r"\southernHarmonyHeads", "Minor"),
"funk": (r"\funkHeads", "Minor"),
"walker": (r"\walkerHeads", "Minor")}
# build the lilypond shape command
if shapes == None:
lshapes = ""
else:
lparts = shape_dic[shapes.lower()]
lshapes = lparts[0]
# there's a different command for minor
if "minor" in key:
lshapes += lparts[1]
tmpl = codecs.open("templates/%s-voice.tmpl" % template,
"r",
"utf-8").read()
return tmpl % {"name": self.name,
"key": key.replace(" ", " \\"), # a minor -> a \minor
"time": time,
"shapes": lshapes,
"notes": " ".join(
[note.to_lilypond(
key,
octave_offset=octave_offset)
for note in self])}
class Tune(list):
"""Represents a vocal-style tune, e.g. a hymn-tune or partsong"""
def __init__(self,
title="",
scripture="",
composer="",
key="",
time=None,
partial=None):
self.title = title
self.scripture = scripture
self.composer = composer
self.key = key
self.time = time
self.partial = partial
def to_lilypond(self,
key,
octave_offset=0,
shapes=None,
lyric=None,
template="default"):
"""Return a Lilypond version of the tune"""
key = key_to_lilypond(key)
# represent the partial beginning measure a la Lilypond if
# necessary
if self.partial:
partial = r"\partial %s" % self.partial
else:
partial = ""
# TODO: make this allow other templates
ly = codecs.open("templates/%s.tmpl" % template, "r", "utf-8").read()
tmpl_data = {"voices": "\n".join(
[voice.to_lilypond(self.time,
key,
octave_offset=octave_offset,
shapes=shapes,
template=template)
for voice in self]),
"author": lyric.author,
"lyrictitle": lyric.title,
"meter": lyric.meter,
"title": self.title,
"scripture": self.scripture,
"composer": self.composer,
"partial": partial}
for voice in self:
tmpl_data["%s_lyrics" % voice.name] = ""
for lvoice in lyric.voices:
tmpl_data["%s_lyrics" % lvoice.name] = lvoice.to_lilypond()
return ly % tmpl_data
def get_node_val(node, val_type):
"""Return the value as a string of a child node of the specified type,
or raise ValueError if none exists"""
for child in node.children:
if child.expr_name == val_type:
return child.text.strip('"')
raise ValueError("No value of specified type.")
def get_string_val(node):
"""Return the value of a string child node, if exists; otherwise,
raise a ValueError"""
try:
return get_node_val(node, "string")
except:
raise ValueError("No string value.")
class DoremiParser(NodeVisitor):
def __init__(self, tune_fn):
NodeVisitor.__init__(self)
# start with an empty tune, voice, note, and list of modifiers
self.tune = Tune()
self.voice = Voice()
self.note = Note()
self.note_modifiers = []
# at the outset, we are not in a voice's content
self.in_content = False
# set up the actual parser
grammar = Grammar(open("doremi-grammar", "r").read())
# read and parse the tune
tune_text = codecs.open(tune_fn, "r", "utf-8").read()
self.syntax = grammar.parse(tune_text)
def convert(self):
"""Convert the parse tree to the internal music representation"""
self.visit(self.syntax)
return self.tune
# title, composer, key, and partial value can only occur at the
# tune level, so they always are added to the tune
def visit_title(self, node, vc):
self.tune.title = get_string_val(node)
def visit_scripture(self, node, vc):
self.tune.scripture = get_string_val(node)
def visit_composer(self, node, vc):
self.tune.composer = get_string_val(node)
def visit_key(self, node, vc):
text = " ".join([child.text for child in node.children
if child.expr_name == "name"])
self.tune.key = text
def visit_partial(self, node, vc):
self.tune.partial = int(get_node_val(node, "number"))
def visit_time(self, node, vc):
time = get_node_val(node, "fraction")
# if it occurs inside a voice's note array
if self.in_content:
self.note_modifiers.append(time)
else: # otherwise, it's at the tune level
self.tune.time = time
# octave and voice-name only occur at the voice level
def visit_octave(self, node, vc):
self.voice.octave = int(get_node_val(node, "number"))
def visit_voice_name(self, node, vc):
self.voice.name = node.children[-1].text
# modifiers only occur in a collection of notes, and are stored at
# the note level
def visit_note_modifier(self, node, vc):
self.note_modifiers.append(node.text)
def visit_voice(self, node, vc):
# a voice is only visited when fully parsed, so the voice is
# already fully constructed; add it to the tune and start a
# new one
self.tune.append(self.voice)
self.voice = Voice()
def visit_note(self, node, vc):
# a note is only visited after its modifiers have been
# visited, so we finalize it and add it to the voice here
# if there's no duration explicit, it's the same as the
# previous note in the same voice
if not self.note.duration:
self.note.duration = self.voice.last_note().duration
self.note.modifiers = self.note_modifiers
self.note.pitch = node.text
# if there's a previous note, start from its octave; if not,
# start from the voice's octave
try:
self.note.octave = self.voice.last_note().octave
except IndexError:
self.note.octave = self.voice.octave
# alter the octave according to octave modifiers
for mod in self.note.modifiers:
if mod == "-":
self.note.octave -= 1
elif mod == "+":
self.note.octave += 1
# if a slur started on the previous note and is not continued
# by this one, explicitly end it
try:
if "slur" in self.voice.last_note().modifiers:
if not "slur" in self.note.modifiers:
self.note.modifiers.append("end slur")
except IndexError:
pass
# add the note to the voice and start a new one with no
# modifiers
self.voice.append(self.note)
self.note = Note()
self.note_modifiers = []
def visit_repeat(self, node, vc):
self.voice.append(RepeatMarker(node.text))
def visit_number(self, node, vc):
# all numbers except note durations are handled at a higher level
if self.in_content:
self.note.duration = node.text
def generic_visit(self, node, vc):
# set whether we're in the note-content of a voice based on
# open- and close-brackets
if node.text == "[":
self.in_content = True
elif node.text == "]":
self.in_content = False
| {
"repo_name": "JasonFruit/doremi",
"path": "doremi/doremi_parser.py",
"copies": "1",
"size": "12775",
"license": "mit",
"hash": -1205490265218397200,
"line_mean": 33.3413978495,
"line_max": 77,
"alpha_frac": 0.5135029354,
"autogenerated": false,
"ratio": 4.109038275972981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5122541211372982,
"avg_score": null,
"num_lines": null
} |
"""A simple Naive-Bayes classifier designed to train on any number of features,
and then classify documents into one of two mutually exclusive categories.
Implements the algorithm described for spam filtering here:
http://en.wikipedia.org/wiki/Naive_Bayes_classifier#Document_classification
"""
from collections import defaultdict
from math import log, exp
class NaiveBayesClassifier(object):
def __init__(self):
self.label_feature_lookup = defaultdict(lambda: defaultdict(int))
self.label_total_feature_counts = defaultdict(int)
self.label_total_document_counts = defaultdict(int)
def train(self, labeled_features):
"""Accepts a list of labeled features -- tuples of format (label,
feature_vector), and learns feature weights"""
for label, feature_vec in labeled_features:
self.label_total_document_counts[label] += 1
for feature in feature_vec:
self.label_feature_lookup[label][feature] += 1
self.label_total_feature_counts[label] += 1
self.all_labels = self.label_total_document_counts.keys()
def classify(self, feature_vec, label1, label2):
'''This function '''
total_weight = 0
for feature in feature_vec:
p_feature_given_label1 = ((self.label_feature_lookup[label1][feature] + 1.0)/
(self.label_total_feature_counts[label1] + 1.0))
p_feature_given_label2 = ((self.label_feature_lookup[label2][feature] + 1.0)/
(self.label_total_feature_counts[label2] + 1.0))
total_weight += log(p_feature_given_label1/p_feature_given_label2)
prior_factor = log((self.label_total_document_counts[label1] + 1.0)/
(self.label_total_document_counts[label2] + 1.0))
if prior_factor + total_weight > 0:
return label1
else:
return label2
| {
"repo_name": "sbrother/chn-machine-learning-talk",
"path": "classifier.py",
"copies": "1",
"size": "1960",
"license": "mit",
"hash": -5624954487269544000,
"line_mean": 48,
"line_max": 89,
"alpha_frac": 0.6336734694,
"autogenerated": false,
"ratio": 3.9918533604887982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027049435239090416,
"num_lines": 40
} |
# A simple neural network which learns XOR using back propagation.
import random
from math import exp
# See wikipedia for the neural network structure
# http://en.wikipedia.org/wiki/Feedforward_neural_network
# http://en.wikipedia.org/wiki/File:XOR_perceptron_net.png
# Pitiful inline diagram:
# z-b3 Output
# / | \
# w4 w5 w6
# / | \
# h1-b0 h2-b1 h3-b2 Hidden Units
# / / \ /
# w0 w1 w2 w3
# \ / \ /
# x0 x1 Input
# activation function: logistic
def g(z):
return 1/(1 + exp(-z))
def g_prime(z):
return g(z) * g(-z)
# Training / Testing data
# (Train, test are same since we are modeling a known boolean function)
ex1 = [0,0,0]
ex2 = [0,1,1]
ex3 = [1,0,1]
ex4 = [1,1,0]
ex = [ex1, ex2, ex3, ex4]
class net:
def __init__(self):
self.eta = 10 # TODO: automate search for optimal parameter values
self.w = [random.random() for _ in range(7)]
self.b = [random.random() for _ in range(4)]
self.delta = [0, 0, 0, 0]
self.net = [0, 0, 0, 0]
# Working weights according the the reference diagram from Wikipedia.
# This allows us to test the network structure and classification
# independently of training.
def set_weights(self):
self.w = [1,1,1,1,1,-2,1]
self.b = [-1,-2,-1,0]
def classify(self, x, p = False):
self.net[0] = self.w[0] * x[0] + self.b[0] * 1
self.net[1] = self.w[1] * x[0] + self.w[2] * x[1] + self.b[1] * 1
self.net[2] = self.w[3] * x[1] + self.b[2] * 1
self.net[3] = (self.w[4] * g(self.net[0])
+ self.w[5] * g(self.net[1])
+ self.w[6] * g(self.net[2])
+ self.b[3] * 1)
if p:
print(g(self.net[3]))
if g(self.net[3]) > .5:
return 1
else:
return 0
def train(self, x):
y = self.classify(x)
t = x[2]
# Back propagation of error
# Note that we calculate ALL deltas before updating any weights
# (It's hard for me to find a webpage that explains why this
# order of operations is important)
self.delta[3] = (t-y) * g_prime(self.net[3])
self.delta[0] = (self.delta[3] * self.w[4]) * g_prime(self.net[0])
self.delta[1] = (self.delta[3] * self.w[5]) * g_prime(self.net[1])
self.delta[2] = (self.delta[3] * self.w[6]) * g_prime(self.net[2])
# Weight update
self.w[4] = self.w[4] + self.eta * self.delta[3] * g(self.net[0])
self.w[5] = self.w[5] + self.eta * self.delta[3] * g(self.net[1])
self.w[6] = self.w[6] + self.eta * self.delta[3] * g(self.net[2])
self.b[3] = self.b[3] + self.eta * self.delta[3] * 1
self.w[0] = self.w[0] + self.eta * self.delta[0] * x[0]
self.b[0] = self.b[0] + self.eta * self.delta[0] * 1
self.w[1] = self.w[1] + self.eta * self.delta[1] * x[0]
self.w[2] = self.w[2] + self.eta * self.delta[1] * x[1]
self.b[1] = self.b[1] + self.eta * self.delta[1] * 1
self.w[3] = self.w[3] + self.eta * self.delta[2] * x[1]
self.b[2] = self.b[2] + self.eta * self.delta[2] * 1
def details(self):
print('output activations')
for x in ex:
nn.classify(x, True)
print("classifications")
for x in ex:
print('{0}: {1}'.format(x[0:2], nn.classify(x)))
print('biases')
print(self.b)
print('weights')
print(self.w)
nn = net()
m = 30
t = [0 for _ in range(m)]
y = [0 for _ in range(m)]
for i in range(m):
x = random.choice(ex)
t[i] = x[2]
y[i] = nn.classify(x)
max_iter = 50000
accuracy = sum(t[i] == y[i] for i in range(m)) / float(m)
n = 0
while accuracy < .99 and n < max_iter:
x = random.choice(ex)
guess = nn.classify(x)
i = n % m
t[i] = x[2]
y[i] = guess
accuracy = sum(t[i] == y[i] for i in range(m)) / float(m)
n = n+1
nn.train(x)
accuracy = sum(t[i] == y[i] for i in range(m)) / float(m)
print('number trials: {0}'.format(n))
print('accuracy: {0}'.format(accuracy))
nn.details()
| {
"repo_name": "devinplatt/XOR-neural-network",
"path": "xor.py",
"copies": "1",
"size": "3954",
"license": "mit",
"hash": 2425651085025210000,
"line_mean": 28.7293233083,
"line_max": 71,
"alpha_frac": 0.5510875063,
"autogenerated": false,
"ratio": 2.496212121212121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35472996275121216,
"avg_score": null,
"num_lines": null
} |
"""A simple number and datetime addition JSON API.
Demonstrates different strategies for parsing arguments
with the FalconParser.
Run the app:
$ pip install gunicorn
$ gunicorn examples.falcon_example:app
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :8000/
$ http GET :8000/ name==Ada
$ http POST :8000/add x=40 y=2
$ http POST :8000/dateadd value=1973-04-10 addend=63
$ http POST :8000/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import datetime as dt
from webargs.core import json
import falcon
from webargs import fields, validate
from webargs.falconparser import use_args, use_kwargs, parser
### Middleware and hooks ###
class JSONTranslator(object):
def process_response(self, req, resp, resource):
if "result" not in req.context:
return
resp.body = json.dumps(req.context["result"])
def add_args(argmap, **kwargs):
def hook(req, resp, params):
req.context["args"] = parser.parse(argmap, req=req, **kwargs)
return hook
### Resources ###
class HelloResource(object):
"""A welcome page."""
hello_args = {"name": fields.Str(missing="Friend", location="query")}
@use_args(hello_args)
def on_get(self, req, resp, args):
req.context["result"] = {"message": "Welcome, {}!".format(args["name"])}
class AdderResource(object):
"""An addition endpoint."""
adder_args = {"x": fields.Float(required=True), "y": fields.Float(required=True)}
@use_kwargs(adder_args)
def on_post(self, req, resp, x, y):
req.context["result"] = {"result": x + y}
class DateAddResource(object):
"""A datetime adder endpoint."""
dateadd_args = {
"value": fields.Date(required=False),
"addend": fields.Int(required=True, validate=validate.Range(min=1)),
"unit": fields.Str(
missing="days", validate=validate.OneOf(["minutes", "days"])
),
}
@falcon.before(add_args(dateadd_args))
def on_post(self, req, resp):
"""A datetime adder endpoint."""
args = req.context["args"]
value = args["value"] or dt.datetime.utcnow()
if args["unit"] == "minutes":
delta = dt.timedelta(minutes=args["addend"])
else:
delta = dt.timedelta(days=args["addend"])
result = value + delta
req.context["result"] = {"result": result.isoformat()}
app = falcon.API(middleware=[JSONTranslator()])
app.add_route("/", HelloResource())
app.add_route("/add", AdderResource())
app.add_route("/dateadd", DateAddResource())
| {
"repo_name": "sloria/webargs",
"path": "examples/falcon_example.py",
"copies": "1",
"size": "2623",
"license": "mit",
"hash": 3976010312239829500,
"line_mean": 26.9042553191,
"line_max": 85,
"alpha_frac": 0.6359130766,
"autogenerated": false,
"ratio": 3.492676431424767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9622525045250014,
"avg_score": 0.0012128925549503731,
"num_lines": 94
} |
"""A simple number and datetime addition JSON API.
Run the app:
$ python examples/aiohttp_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http POST :5001/dateadd value=1973-04-10 addend=63
$ http POST :5001/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import asyncio
import datetime as dt
import json
from aiohttp import web
from webargs import fields, validate
from webargs.aiohttpparser import use_args, use_kwargs
def jsonify(data, **kwargs):
kwargs.setdefault('content_type', 'application/json')
return web.Response(
body=json.dumps(data).encode('utf-8'),
**kwargs
)
hello_args = {
'name': fields.Str(missing='Friend')
}
@asyncio.coroutine
@use_args(hello_args)
def index(request, args):
"""A welcome page.
"""
return jsonify({'message': 'Welcome, {}!'.format(args['name'])})
add_args = {
'x': fields.Float(required=True),
'y': fields.Float(required=True),
}
@asyncio.coroutine
@use_kwargs(add_args)
def add(request, x, y):
"""An addition endpoint."""
return jsonify({'result': x + y})
dateadd_args = {
'value': fields.DateTime(required=False),
'addend': fields.Int(required=True, validate=validate.Range(min=1)),
'unit': fields.Str(missing='days', validate=validate.OneOf(['minutes', 'days']))
}
@asyncio.coroutine
@use_kwargs(dateadd_args)
def dateadd(request, value, addend, unit):
"""A datetime adder endpoint."""
value = value or dt.datetime.utcnow()
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return jsonify({'result': result.isoformat()})
def create_app():
app = web.Application()
app.router.add_route('GET', '/', index)
app.router.add_route('POST', '/add', add)
app.router.add_route('POST', '/dateadd', dateadd)
return app
def run(app, port=5001):
loop = asyncio.get_event_loop()
handler = app.make_handler()
f = loop.create_server(handler, '0.0.0.0', port)
srv = loop.run_until_complete(f)
print('serving on', srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(handler.finish_connections(1.0))
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(app.finish())
loop.close()
if __name__ == '__main__':
app = create_app()
run(app)
| {
"repo_name": "Basis/webargs",
"path": "examples/aiohttp_example.py",
"copies": "1",
"size": "2624",
"license": "mit",
"hash": -8257689671009240000,
"line_mean": 26.914893617,
"line_max": 84,
"alpha_frac": 0.6459603659,
"autogenerated": false,
"ratio": 3.3173198482932995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437557208268635,
"avg_score": 0.005144601184932906,
"num_lines": 94
} |
"""A simple number and datetime addition JSON API.
Run the app:
$ python examples/bottle_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http POST :5001/dateadd value=1973-04-10 addend=63
$ http POST :5001/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import datetime as dt
import json
from bottle import route, run, error, response
from webargs import fields, ValidationError
from webargs.bottleparser import use_args, use_kwargs
hello_args = {
'name': fields.Str(missing='Friend')
}
@route('/', method='GET')
@use_args(hello_args)
def index(args):
"""A welcome page.
"""
return {'message': 'Welcome, {}!'.format(args['name'])}
add_args = {
'x': fields.Float(required=True),
'y': fields.Float(required=True),
}
@route('/add', method='POST')
@use_kwargs(add_args)
def add(x, y):
"""An addition endpoint."""
return {'result': x + y}
def validate_unit(val):
if val not in ['minutes', 'days']:
raise ValidationError("Unit must be either 'minutes' or 'days'.")
dateadd_args = {
'value': fields.DateTime(required=False),
'addend': fields.Int(required=True, validate=lambda val: val >= 0),
'unit': fields.Str(validate=validate_unit)
}
@route('/dateadd', method='POST')
@use_kwargs(dateadd_args)
def dateadd(value, addend, unit):
"""A datetime adder endpoint."""
value = value or dt.datetime.utcnow()
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return {'result': result.isoformat()}
# Return validation errors as JSON
@error(422)
def error422(err):
response.content_type = 'application/json'
return json.dumps({'message': str(err.body)})
if __name__ == '__main__':
run(port=5001, reloader=True, debug=True)
| {
"repo_name": "hyunchel/webargs",
"path": "examples/bottle_example.py",
"copies": "1",
"size": "1970",
"license": "mit",
"hash": 2207214155787856000,
"line_mean": 26.7464788732,
"line_max": 73,
"alpha_frac": 0.6563451777,
"autogenerated": false,
"ratio": 3.272425249169435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4428770426869435,
"avg_score": null,
"num_lines": null
} |
"""A simple number and datetime addition JSON API.
Run the app:
$ python examples/flask_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http POST :5001/dateadd value=1973-04-10 addend=63
$ http POST :5001/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import datetime as dt
from dateutil import parser
from flask import Flask, jsonify
from webargs import Arg, ValidationError
from webargs.flaskparser import use_args, use_kwargs
app = Flask(__name__)
hello_args = {
'name': Arg(str, default='Friend')
}
@app.route('/', methods=['GET'])
@use_args(hello_args)
def index(args):
"""A welcome page.
"""
return jsonify({'message': 'Welcome, {}!'.format(args['name'])})
add_args = {
'x': Arg(float, required=True),
'y': Arg(float, required=True),
}
@app.route('/add', methods=['POST'])
@use_kwargs(add_args)
def add(x, y):
"""An addition endpoint."""
return jsonify({'result': x + y})
def string_to_datetime(val):
return parser.parse(val)
def validate_unit(val):
if val not in ['minutes', 'days']:
raise ValidationError("Unit must be either 'minutes' or 'days'.")
dateadd_args = {
'value': Arg(default=dt.datetime.utcnow, use=string_to_datetime),
'addend': Arg(int, required=True, validate=lambda val: val >= 0),
'unit': Arg(str, validate=validate_unit)
}
@app.route('/dateadd', methods=['POST'])
@use_kwargs(dateadd_args)
def dateadd(value, addend, unit):
"""A datetime adder endpoint."""
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return jsonify({'result': result.isoformat()})
# Return validation errors as JSON
@app.errorhandler(400)
def handle_validation_error(err):
exc = err.data['exc']
return jsonify({'message': str(exc)}), 400
if __name__ == '__main__':
app.run(port=5001, debug=True)
| {
"repo_name": "yufeiminds/webargs",
"path": "examples/flask_example.py",
"copies": "4",
"size": "2061",
"license": "mit",
"hash": 8704752024359293000,
"line_mean": 26.1184210526,
"line_max": 73,
"alpha_frac": 0.6521106259,
"autogenerated": false,
"ratio": 3.2610759493670884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003981734672415393,
"num_lines": 76
} |
"""A simple number and datetime addition JSON API.
Run the app:
$ python examples/pyramid_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http POST :5001/dateadd value=1973-04-10 addend=63
$ http POST :5001/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import datetime as dt
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.view import view_config
from pyramid.renderers import JSON
from webargs import fields, validate
from webargs.pyramidparser import use_args, use_kwargs
hello_args = {"name": fields.Str(missing="Friend")}
@view_config(route_name="hello", request_method="GET", renderer="json")
@use_args(hello_args)
def index(request, args):
"""A welcome page.
"""
return {"message": "Welcome, {}!".format(args["name"])}
add_args = {"x": fields.Float(required=True), "y": fields.Float(required=True)}
@view_config(route_name="add", request_method="POST", renderer="json")
@use_kwargs(add_args)
def add(request, x, y):
"""An addition endpoint."""
return {"result": x + y}
dateadd_args = {
"value": fields.Date(required=False),
"addend": fields.Int(required=True, validate=validate.Range(min=1)),
"unit": fields.Str(missing="days", validate=validate.OneOf(["minutes", "days"])),
}
@view_config(route_name="dateadd", request_method="POST", renderer="json")
@use_kwargs(dateadd_args)
def dateadd(request, value, addend, unit):
"""A date adder endpoint."""
value = value or dt.datetime.utcnow()
if unit == "minutes":
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return {"result": result}
if __name__ == "__main__":
config = Configurator()
json_renderer = JSON()
json_renderer.add_adapter(dt.datetime, lambda v, request: v.isoformat())
config.add_renderer("json", json_renderer)
config.add_route("hello", "/")
config.add_route("add", "/add")
config.add_route("dateadd", "/dateadd")
config.scan(__name__)
app = config.make_wsgi_app()
port = 5001
server = make_server("0.0.0.0", port, app)
print("Serving on port {}".format(port))
server.serve_forever()
| {
"repo_name": "sloria/webargs",
"path": "examples/pyramid_example.py",
"copies": "1",
"size": "2374",
"license": "mit",
"hash": 1906335846003752200,
"line_mean": 27.9512195122,
"line_max": 85,
"alpha_frac": 0.6655433867,
"autogenerated": false,
"ratio": 3.2835408022130013,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4449084188913001,
"avg_score": null,
"num_lines": null
} |
"""A simple number and datetime addition JSON API.
Run the app:
$ python examples/tornado_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http POST :5001/dateadd value=1973-04-10 addend=63
$ http POST :5001/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import datetime as dt
from dateutil import parser
import tornado.ioloop
from tornado.web import RequestHandler
from webargs import Arg, ValidationError
from webargs.tornadoparser import use_args, use_kwargs
class BaseRequestHandler(RequestHandler):
def write_error(self, status_code, **kwargs):
"""Write errors as JSON."""
self.set_header('Content-Type', 'application/json')
if 'exc_info' in kwargs:
etype, value, traceback = kwargs['exc_info']
msg = value.log_message or str(value)
self.write({'message': msg})
self.finish()
class HelloHandler(BaseRequestHandler):
"""A welcome page."""
hello_args = {
'name': Arg(str, default='Friend')
}
@use_args(hello_args)
def get(self, args):
response = {'message': 'Welcome, {}!'.format(args['name'])}
self.write(response)
class AdderHandler(BaseRequestHandler):
"""An addition endpoint."""
add_args = {
'x': Arg(float, required=True),
'y': Arg(float, required=True),
}
@use_kwargs(add_args)
def post(self, x, y):
self.write({'result': x + y})
def string_to_datetime(val):
return parser.parse(val)
def validate_unit(val):
if val not in ['minutes', 'days']:
raise ValidationError("Unit must be either 'minutes' or 'days'.")
class DateAddHandler(BaseRequestHandler):
"""A datetime adder endpoint."""
dateadd_args = {
'value': Arg(default=dt.datetime.utcnow, use=string_to_datetime),
'addend': Arg(int, required=True, validate=lambda val: val >= 0),
'unit': Arg(str, validate=validate_unit)
}
@use_kwargs(dateadd_args)
def post(self, value, addend, unit):
"""A datetime adder endpoint."""
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
self.write({'result': result.isoformat()})
if __name__ == '__main__':
app = tornado.web.Application([
(r'/', HelloHandler),
(r'/add', AdderHandler),
(r'/dateadd', DateAddHandler),
], debug=True)
app.listen(5001)
tornado.ioloop.IOLoop.instance().start()
| {
"repo_name": "jmcarp/webargs",
"path": "examples/tornado_example.py",
"copies": "4",
"size": "2673",
"license": "mit",
"hash": 6587536953654920000,
"line_mean": 27.4361702128,
"line_max": 73,
"alpha_frac": 0.6210250655,
"autogenerated": false,
"ratio": 3.6024258760107815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6223450941510782,
"avg_score": null,
"num_lines": null
} |
"""A simple object database. As long as the server is not running in
multiprocess mode that's good enough.
"""
import dbm
from pickle import dumps
from pickle import loads
from threading import Lock
class Database:
def __init__(self, filename):
self.filename = filename
self._fs = dbm.open(filename, "cf")
self._local = {}
self._lock = Lock()
def __getitem__(self, key):
with self._lock:
return self._load_key(key)
def _load_key(self, key):
if key in self._local:
return self._local[key]
rv = loads(self._fs[key])
self._local[key] = rv
return rv
def __setitem__(self, key, value):
self._local[key] = value
def __delitem__(self, key):
with self._lock:
self._local.pop(key, None)
if key in self._fs:
del self._fs[key]
def __del__(self):
self.close()
def __contains__(self, key):
with self._lock:
try:
self._load_key(key)
except KeyError:
pass
return key in self._local
def setdefault(self, key, factory):
with self._lock:
try:
rv = self._load_key(key)
except KeyError:
self._local[key] = rv = factory()
return rv
def sync(self):
with self._lock:
for key, value in self._local.items():
self._fs[key] = dumps(value, 2)
self._fs.sync()
def close(self):
try:
self.sync()
self._fs.close()
except Exception:
pass
| {
"repo_name": "pallets/werkzeug",
"path": "examples/cupoftee/db.py",
"copies": "1",
"size": "1668",
"license": "bsd-3-clause",
"hash": 2953181254301571000,
"line_mean": 23.8955223881,
"line_max": 68,
"alpha_frac": 0.5041966427,
"autogenerated": false,
"ratio": 4.149253731343284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5153450374043284,
"avg_score": null,
"num_lines": null
} |
'''A simple parallax rendering module'''
# -*- coding: utf-8 -*-
# Copyright (C) , 2012 Åke Forslund (ake.forslund@gmail.com)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import pygame
class _subsurface:
'''Container class for subsurface'''
def __init__(self, surface, factor):
self.scroll = 0
self.factor = factor
self.surface = surface
class ParallaxSurface:
'''Class handling parallax scrolling of a series of surfaces'''
def __init__(self, size, colorkey_flags = 0):
print "parllaxSurface inited!"
self.colorkey_flags = colorkey_flags
self.scroller = 0
self.levels = []
self.size = size
def add(self, image_path, scroll_factor):
'''Adds a parallax level, first added level is the
deepest level, i.e. furthest back into the \"screen\".
image_path is the path to the image to be used
scroll_factor is the slowdown factor for this parallax level.'''
try:
image = (pygame.image.load(image_path))
except:
message = "couldn't open image:" + image_path
raise SystemExit, message
image = image.convert()
if len(self.levels) > 0:
image.set_colorkey((0xff, 0x00, 0xea), self.colorkey_flags)
self.levels.append(_subsurface(image, scroll_factor))
def add_surface(self, surface, scroll_factor):
surface = surface.convert()
if len(self.levels) > 0:
surface.set_colorkey((0xff, 0x00, 0xea), self.colorkey_flags)
self.levels.append(_subsurface(surface, scroll_factor))
def draw(self, surface):
''' This draws all parallax levels to the surface
provided as argument '''
s_width = self.size[0]
s_height = self.size[1]
for lvl in self.levels:
surface.blit(lvl.surface, (0, 0),
(lvl.scroll, 0, s_width, s_height))
surface.blit(lvl.surface,
(lvl.surface.get_width() - lvl.scroll, 0),
(0, 0, lvl.scroll, s_height))
def scroll(self, offset):
'''scroll moves each surface _offset_ pixels / assigned factor'''
self.scroller = (self.scroller + offset)
for lvl in self.levels:
lvl.scroll = (self.scroller / lvl.factor) % lvl.surface.get_width()
| {
"repo_name": "lewiscowper/inferno",
"path": "parallax.py",
"copies": "1",
"size": "3083",
"license": "mit",
"hash": -3160580409265154600,
"line_mean": 39.025974026,
"line_max": 79,
"alpha_frac": 0.6278390655,
"autogenerated": false,
"ratio": 3.8333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9877551121619592,
"avg_score": 0.01672425544274822,
"num_lines": 77
} |
"""A simple parallel nearest neighbors classifier.
This one is parallelized using multiprocessing. It splits the training data into
chunks and processes them in parallel using a pool of worker processes.
"""
import multiprocessing
import numpy as np
from shredsim import classifiers
def _init_nn_global(init_all_samples):
global _all_samples
_all_samples = init_all_samples
def _find_closest_idx(arg):
image, range = arg
global all_samples
all_distances = [(np.linalg.norm(image - sample), idx)
for idx, sample in enumerate(_all_samples[range])]
closest = min(all_distances)
closest = closest[0], closest[1] + range.start
return closest
class NNClassifier(classifiers.StatelessClassifierBase):
_pool = None
def __init__(self):
pass
def train(self, dataset):
self._all_samples, self._all_labels = dataset
num_ranges = multiprocessing.cpu_count()
items_per_range = int(np.ceil(
float(len(self._all_samples)) / num_ranges))
self._ranges = [slice(i * items_per_range, (i+1)*items_per_range)
for i in range(num_ranges)]
self._init_pool()
def _init_pool(self):
if self._pool is None:
self._pool = multiprocessing.Pool(processes=None,
initializer=_init_nn_global,
initargs=(self._all_samples,))
def _predict_one(self, x):
args = ((x, range) for range in self._ranges)
closest_per_range = self._pool.map(_find_closest_idx, args)
res = min(closest_per_range)[1]
print 'predict idx', res
return self._all_labels[res]
def predict(self, X):
return map(self._predict_one, X)
def close(self):
self._pool.close()
self._pool.join()
self._pool = None
def __del__(self):
if self._pool is not None:
self.close()
| {
"repo_name": "xa4a/shredsim",
"path": "shredsim/classifiers/nn.py",
"copies": "1",
"size": "1987",
"license": "mit",
"hash": -4332657686122779000,
"line_mean": 26.985915493,
"line_max": 80,
"alpha_frac": 0.5933568193,
"autogenerated": false,
"ratio": 3.974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9996064870073778,
"avg_score": 0.014258389845244307,
"num_lines": 71
} |
"""a simple 'parser' for :ex commands
"""
from collections import namedtuple
import re
from itertools import takewhile
from vex import ex_error
from vex import parsers
# Data used to parse strings into ex commands and map them to an actual
# Sublime Text command.
#
# command
# The Sublime Text command to be executed.
# invocations
# Tuple of regexes representing valid calls for this command.
# error_on
# Tuple of error codes. The parsed command is checked for errors based
# on this information.
# For example: on_error=(ex_error.ERR_TRAILING_CHARS,) would make the
# command fail if it was followed by any arguments.
ex_cmd_data = namedtuple('ex_cmd_data', 'command invocations error_on')
# Holds a parsed ex command data.
# TODO: elaborate on params info.
EX_CMD = namedtuple('ex_command', 'name command forced args parse_errors line_range can_have_range')
# Address that can only appear after a command.
POSTFIX_ADDRESS = r'[.$]|(?:/.*?(?<!\\)/|\?.*?(?<!\\)\?){1,2}|[+-]?\d+|[\'][a-zA-Z0-9<>]'
ADDRESS_OFFSET = r'[-+]\d+'
# Can only appear standalone.
OPENENDED_SEARCH_ADDRESS = r'^[/?].*'
# ** IMPORTANT **
# Vim's documentation on valid addresses is wrong. For postfixed addresses,
# as in :copy10,20, only the left end is parsed and used; the rest is discarded
# and not even errors are thrown if the right end is bogus, like in :copy10XXX.
EX_POSTFIX_ADDRESS = re.compile(
r'''(?x)
^(?P<address>
(?:
# A postfix address...
(?:%(address)s)
# optionally followed by offsets...
(?:%(offset)s)*
)|
# or an openended search-based address.
%(openended)s
)
''' % {'address': POSTFIX_ADDRESS,
'offset': ADDRESS_OFFSET,
'openended': OPENENDED_SEARCH_ADDRESS}
)
EX_COMMANDS = {
('write', 'w'): ex_cmd_data(
command='ex_write_file',
invocations=(
re.compile(r'^\s*$'),
re.compile(r'(?P<plusplus_args> *\+\+[a-zA-Z0-9_]+)* *(?P<operator>>>) *(?P<target_redirect>.+)?'),
# fixme: raises an error when it shouldn't
re.compile(r'(?P<plusplus_args> *\+\+[a-zA-Z0-9_]+)* *!(?P<subcmd>.+)'),
re.compile(r'(?P<plusplus_args> *\+\+[a-zA-Z0-9_]+)* *(?P<file_name>.+)?'),
),
error_on=()
),
('wall', 'wa'): ex_cmd_data(
command='ex_write_all',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('pwd', 'pw'): ex_cmd_data(
command='ex_print_working_dir',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_TRAILING_CHARS)
),
('buffers', 'buffers'): ex_cmd_data(
command='ex_prompt_select_open_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('files', 'files'): ex_cmd_data(
command='ex_prompt_select_open_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('ls', 'ls'): ex_cmd_data(
command='ex_prompt_select_open_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('registers', 'reg'): ex_cmd_data(
command='ex_list_registers',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('map', 'map'): ex_cmd_data(
command='ex_map',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('abbreviate', 'ab'): ex_cmd_data(
command='ex_abbreviate',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('quit', 'q'): ex_cmd_data(
command='ex_quit',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('qall', 'qa'): ex_cmd_data(
command='ex_quit_all',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
# TODO: add invocations
('wq', 'wq'): ex_cmd_data(
command='ex_write_and_quit',
invocations=(),
error_on=()
),
('read', 'r'): ex_cmd_data(
command='ex_read_shell_out',
invocations=(
# xxx: works more or less by chance. fix the command code
re.compile(r'(?P<plusplus> *\+\+[a-zA-Z0-9_]+)* *(?P<name>.+)'),
re.compile(r' *!(?P<name>.+)'),
),
# fixme: add error category for ARGS_REQUIRED
error_on=()
),
('enew', 'ene'): ex_cmd_data(
command='ex_new_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('ascii', 'as'): ex_cmd_data(
# This command is implemented in Packages/Vintage.
command='show_ascii_info',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_TRAILING_CHARS)
),
# vim help doesn't say this command takes any args, but it does
('file', 'f'): ex_cmd_data(
command='ex_file',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('move', 'move'): ex_cmd_data(
command='ex_move',
invocations=(
EX_POSTFIX_ADDRESS,
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_ADDRESS_REQUIRED,)
),
('copy', 'co'): ex_cmd_data(
command='ex_copy',
invocations=(
EX_POSTFIX_ADDRESS,
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_ADDRESS_REQUIRED,)
),
('t', 't'): ex_cmd_data(
command='ex_copy',
invocations=(
EX_POSTFIX_ADDRESS,
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_ADDRESS_REQUIRED,)
),
('substitute', 's'): ex_cmd_data(
command='ex_substitute',
invocations=(re.compile(r'(?P<pattern>.+)'),
),
error_on=()
),
('&&', '&&'): ex_cmd_data(
command='ex_double_ampersand',
# We don't want to mantain flag values here, so accept anything and
# let :substitute handle the values.
invocations=(re.compile(r'(?P<flags>.+?)\s*(?P<count>[0-9]+)'),
re.compile(r'\s*(?P<flags>.+?)\s*'),
re.compile(r'\s*(?P<count>[0-9]+)\s*'),
re.compile(r'^$'),
),
error_on=()
),
('shell', 'sh'): ex_cmd_data(
command='ex_shell',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_TRAILING_CHARS)
),
('delete', 'd'): ex_cmd_data(
command='ex_delete',
invocations=(
re.compile(r' *(?P<register>[a-zA-Z0-9])? *(?P<count>\d+)?'),
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,)
),
('global', 'g'): ex_cmd_data(
command='ex_global',
invocations=(
re.compile(r'(?P<pattern>.+)'),
),
error_on=()
),
('print', 'p'): ex_cmd_data(
command='ex_print',
invocations=(
re.compile(r'\s*(?P<count>\d+)?\s*(?P<flags>[l#p]+)?'),
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,)
),
('Print', 'P'): ex_cmd_data(
command='ex_print',
invocations=(
re.compile(r'\s*(?P<count>\d+)?\s*(?P<flags>[l#p]+)?'),
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,)
),
('browse', 'bro'): ex_cmd_data(
command='ex_browse',
invocations=(),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_TRAILING_CHARS,)
),
('edit', 'e'): ex_cmd_data(
command='ex_edit',
invocations=(re.compile(r"^$"),),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('cquit', 'cq'): ex_cmd_data(
command='ex_cquit',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,)
),
# TODO: implement all arguments, etc.
('xit', 'x'): ex_cmd_data(
command='ex_exit',
invocations=(),
error_on=()
),
# TODO: implement all arguments, etc.
('exit', 'exi'): ex_cmd_data(
command='ex_exit',
invocations=(),
error_on=()
),
('only', 'on'): ex_cmd_data(
command='ex_only',
invocations=(re.compile(r'^$'),),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('new', 'new'): ex_cmd_data(
command='ex_new',
invocations=(re.compile(r'^$',),
),
error_on=(ex_error.ERR_TRAILING_CHARS,)
),
('yank', 'y'): ex_cmd_data(
command='ex_yank',
invocations=(re.compile(r'^(?P<register>\d|[a-z])$'),
re.compile(r'^(?P<register>\d|[a-z]) (?P<count>\d+)$'),
),
error_on=(),
),
(':', ':'): ex_cmd_data(
command='ex_goto',
invocations=(),
error_on=(),
),
('!', '!'): ex_cmd_data(
command='ex_shell_out',
invocations=(
re.compile(r'(?P<shell_cmd>.+)$'),
),
# FIXME: :!! is a different command to :!
error_on=(ex_error.ERR_NO_BANG_ALLOWED,),
),
('tabedit', 'tabe'): ex_cmd_data(
command='ex_tab_open',
invocations=(
re.compile(r'^(?P<file_name>.+)$'),
),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,),
),
('tabnext', 'tabn'): ex_cmd_data(command='ex_tab_next',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tabprev', 'tabp'): ex_cmd_data(command='ex_tab_prev',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tabfirst', 'tabf'): ex_cmd_data(command='ex_tab_first',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tablast', 'tabl'): ex_cmd_data(command='ex_tab_last',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tabonly', 'tabo'): ex_cmd_data(command='ex_tab_only',
invocations=(),
error_on=(
ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_TRAILING_CHARS,)
)
}
def find_command(cmd_name):
partial_matches = [name for name in EX_COMMANDS.keys()
if name[0].startswith(cmd_name)]
if not partial_matches: return None
full_match = [(ln, sh) for (ln, sh) in partial_matches
if cmd_name in (ln, sh)]
if full_match:
return full_match[0]
else:
return partial_matches[0]
def parse_command(cmd):
cmd_name = cmd.strip()
if len(cmd_name) > 1:
cmd_name = cmd_name[1:]
elif not cmd_name == ':':
return None
parser = parsers.cmd_line.CommandLineParser(cmd[1:])
r_ = parser.parse_cmd_line()
command = r_['commands'][0]['cmd']
bang = r_['commands'][0]['forced']
args = r_['commands'][0]['args']
cmd_data = find_command(command)
if not cmd_data:
return
cmd_data = EX_COMMANDS[cmd_data]
can_have_range = ex_error.ERR_NO_RANGE_ALLOWED not in cmd_data.error_on
cmd_args = {}
for pattern in cmd_data.invocations:
found_args = pattern.search(args)
if found_args:
found_args = found_args.groupdict()
# get rid of unset arguments so they don't clobber defaults
found_args = dict((k, v) for k, v in found_args.iteritems()
if v is not None)
cmd_args.update(found_args)
break
parse_errors = []
for err in cmd_data.error_on:
if err == ex_error.ERR_NO_BANG_ALLOWED and bang:
parse_errors.append(ex_error.ERR_NO_BANG_ALLOWED)
if err == ex_error.ERR_TRAILING_CHARS and args:
parse_errors.append(ex_error.ERR_TRAILING_CHARS)
if err == ex_error.ERR_NO_RANGE_ALLOWED and r_['range']['text_range']:
parse_errors.append(ex_error.ERR_NO_RANGE_ALLOWED)
if err == ex_error.ERR_INVALID_RANGE and not cmd_args:
parse_errors.append(ex_error.ERR_INVALID_RANGE)
if err == ex_error.ERR_ADDRESS_REQUIRED and not cmd_args:
parse_errors.append(ex_error.ERR_ADDRESS_REQUIRED)
return EX_CMD(name=command,
command=cmd_data.command,
forced=bang,
args=cmd_args,
parse_errors=parse_errors,
line_range=r_['range'],
can_have_range=can_have_range,)
| {
"repo_name": "SublimeText/VintageEx",
"path": "vex/ex_command_parser.py",
"copies": "1",
"size": "19121",
"license": "mit",
"hash": -4063670113969387000,
"line_mean": 47.2853535354,
"line_max": 135,
"alpha_frac": 0.3578264735,
"autogenerated": false,
"ratio": 4.980724146913259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.583855062041326,
"avg_score": null,
"num_lines": null
} |
"""a simple 'parser' for :ex commands
"""
from collections import namedtuple
import re
from itertools import takewhile
from VintageousEx.vex import ex_error
from VintageousEx.vex import parsers
# Data used to parse strings into ex commands and map them to an actual
# Sublime Text command.
#
# command
# The Sublime Text command to be executed.
# invocations
# Tuple of regexes representing valid calls for this command.
# error_on
# Tuple of error codes. The parsed command is checked for errors based
# on this information.
# For example: on_error=(ex_error.ERR_TRAILING_CHARS,) would make the
# command fail if it was followed by any arguments.
ex_cmd_data = namedtuple('ex_cmd_data', 'command invocations error_on')
# Holds a parsed ex command data.
# TODO: elaborate on params info.
EX_CMD = namedtuple('ex_command', 'name command forced args parse_errors line_range can_have_range')
# Address that can only appear after a command.
POSTFIX_ADDRESS = r'[.$]|(?:/.*?(?<!\\)/|\?.*?(?<!\\)\?){1,2}|[+-]?\d+|[\'][a-zA-Z0-9<>]'
ADDRESS_OFFSET = r'[-+]\d+'
# Can only appear standalone.
OPENENDED_SEARCH_ADDRESS = r'^[/?].*'
# ** IMPORTANT **
# Vim's documentation on valid addresses is wrong. For postfixed addresses,
# as in :copy10,20, only the left end is parsed and used; the rest is discarded
# and not even errors are thrown if the right end is bogus, like in :copy10XXX.
EX_POSTFIX_ADDRESS = re.compile(
r'''(?x)
^(?P<address>
(?:
# A postfix address...
(?:%(address)s)
# optionally followed by offsets...
(?:%(offset)s)*
)|
# or an openended search-based address.
%(openended)s
)
''' % {'address': POSTFIX_ADDRESS,
'offset': ADDRESS_OFFSET,
'openended': OPENENDED_SEARCH_ADDRESS}
)
EX_COMMANDS = {
('write', 'w'): ex_cmd_data(
command='ex_write_file',
invocations=(
re.compile(r'^\s*$'),
re.compile(r'(?P<plusplus_args> *\+\+[a-zA-Z0-9_]+)* *(?P<operator>>>) *(?P<target_redirect>.+)?'),
# fixme: raises an error when it shouldn't
re.compile(r'(?P<plusplus_args> *\+\+[a-zA-Z0-9_]+)* *!(?P<subcmd>.+)'),
re.compile(r'(?P<plusplus_args> *\+\+[a-zA-Z0-9_]+)* *(?P<file_name>.+)?'),
),
error_on=()
),
('wall', 'wa'): ex_cmd_data(
command='ex_write_all',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('pwd', 'pw'): ex_cmd_data(
command='ex_print_working_dir',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_TRAILING_CHARS)
),
('buffers', 'buffers'): ex_cmd_data(
command='ex_prompt_select_open_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('files', 'files'): ex_cmd_data(
command='ex_prompt_select_open_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('ls', 'ls'): ex_cmd_data(
command='ex_prompt_select_open_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('registers', 'reg'): ex_cmd_data(
command='ex_list_registers',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('map', 'map'): ex_cmd_data(
command='ex_map',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('abbreviate', 'ab'): ex_cmd_data(
command='ex_abbreviate',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('quit', 'q'): ex_cmd_data(
command='ex_quit',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('qall', 'qa'): ex_cmd_data(
command='ex_quit_all',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
# TODO: add invocations
('wq', 'wq'): ex_cmd_data(
command='ex_write_and_quit',
invocations=(),
error_on=()
),
('read', 'r'): ex_cmd_data(
command='ex_read_shell_out',
invocations=(
# xxx: works more or less by chance. fix the command code
re.compile(r'(?P<plusplus> *\+\+[a-zA-Z0-9_]+)* *(?P<name>.+)'),
re.compile(r' *!(?P<name>.+)'),
),
# fixme: add error category for ARGS_REQUIRED
error_on=()
),
('enew', 'ene'): ex_cmd_data(
command='ex_new_file',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('ascii', 'as'): ex_cmd_data(
# This command is implemented in Packages/Vintage.
command='show_ascii_info',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_TRAILING_CHARS)
),
# vim help doesn't say this command takes any args, but it does
('file', 'f'): ex_cmd_data(
command='ex_file',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('move', 'move'): ex_cmd_data(
command='ex_move',
invocations=(
EX_POSTFIX_ADDRESS,
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_ADDRESS_REQUIRED,)
),
('copy', 'co'): ex_cmd_data(
command='ex_copy',
invocations=(
EX_POSTFIX_ADDRESS,
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_ADDRESS_REQUIRED,)
),
('t', 't'): ex_cmd_data(
command='ex_copy',
invocations=(
EX_POSTFIX_ADDRESS,
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_ADDRESS_REQUIRED,)
),
('substitute', 's'): ex_cmd_data(
command='ex_substitute',
invocations=(re.compile(r'(?P<pattern>.+)'),
),
error_on=()
),
('&&', '&&'): ex_cmd_data(
command='ex_double_ampersand',
# We don't want to mantain flag values here, so accept anything and
# let :substitute handle the values.
invocations=(re.compile(r'(?P<flags>.+?)\s*(?P<count>[0-9]+)'),
re.compile(r'\s*(?P<flags>.+?)\s*'),
re.compile(r'\s*(?P<count>[0-9]+)\s*'),
re.compile(r'^$'),
),
error_on=()
),
('shell', 'sh'): ex_cmd_data(
command='ex_shell',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_TRAILING_CHARS)
),
('delete', 'd'): ex_cmd_data(
command='ex_delete',
invocations=(
re.compile(r' *(?P<register>[a-zA-Z0-9])? *(?P<count>\d+)?'),
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,)
),
('global', 'g'): ex_cmd_data(
command='ex_global',
invocations=(
re.compile(r'(?P<pattern>.+)'),
),
error_on=()
),
('print', 'p'): ex_cmd_data(
command='ex_print',
invocations=(
re.compile(r'\s*(?P<count>\d+)?\s*(?P<flags>[l#p]+)?'),
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,)
),
('Print', 'P'): ex_cmd_data(
command='ex_print',
invocations=(
re.compile(r'\s*(?P<count>\d+)?\s*(?P<flags>[l#p]+)?'),
),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,)
),
('browse', 'bro'): ex_cmd_data(
command='ex_browse',
invocations=(),
error_on=(ex_error.ERR_NO_BANG_ALLOWED,
ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_TRAILING_CHARS,)
),
('edit', 'e'): ex_cmd_data(
command='ex_edit',
invocations=(re.compile(r"^$"),),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('cquit', 'cq'): ex_cmd_data(
command='ex_cquit',
invocations=(),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_NO_BANG_ALLOWED,)
),
# TODO: implement all arguments, etc.
('xit', 'x'): ex_cmd_data(
command='ex_exit',
invocations=(),
error_on=()
),
# TODO: implement all arguments, etc.
('exit', 'exi'): ex_cmd_data(
command='ex_exit',
invocations=(),
error_on=()
),
('only', 'on'): ex_cmd_data(
command='ex_only',
invocations=(re.compile(r'^$'),),
error_on=(ex_error.ERR_TRAILING_CHARS,
ex_error.ERR_NO_RANGE_ALLOWED,)
),
('new', 'new'): ex_cmd_data(
command='ex_new',
invocations=(re.compile(r'^$',),
),
error_on=(ex_error.ERR_TRAILING_CHARS,)
),
('yank', 'y'): ex_cmd_data(
command='ex_yank',
invocations=(re.compile(r'^(?P<register>\d|[a-z])$'),
re.compile(r'^(?P<register>\d|[a-z]) (?P<count>\d+)$'),
),
error_on=(),
),
(':', ':'): ex_cmd_data(
command='ex_goto',
invocations=(),
error_on=(),
),
('!', '!'): ex_cmd_data(
command='ex_shell_out',
invocations=(
re.compile(r'(?P<shell_cmd>.+)$'),
),
# FIXME: :!! is a different command to :!
error_on=(ex_error.ERR_NO_BANG_ALLOWED,),
),
('tabedit', 'tabe'): ex_cmd_data(
command='ex_tab_open',
invocations=(
re.compile(r'^(?P<file_name>.+)$'),
),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,),
),
('tabnext', 'tabn'): ex_cmd_data(command='ex_tab_next',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tabprev', 'tabp'): ex_cmd_data(command='ex_tab_prev',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tabfirst', 'tabf'): ex_cmd_data(command='ex_tab_first',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tablast', 'tabl'): ex_cmd_data(command='ex_tab_last',
invocations=(),
error_on=(ex_error.ERR_NO_RANGE_ALLOWED,)
),
('tabonly', 'tabo'): ex_cmd_data(command='ex_tab_only',
invocations=(),
error_on=(
ex_error.ERR_NO_RANGE_ALLOWED,
ex_error.ERR_TRAILING_CHARS,)
)
}
def find_command(cmd_name):
partial_matches = [name for name in EX_COMMANDS.keys()
if name[0].startswith(cmd_name)]
if not partial_matches: return None
full_match = [(ln, sh) for (ln, sh) in partial_matches
if cmd_name in (ln, sh)]
if full_match:
return full_match[0]
else:
return partial_matches[0]
def parse_command(cmd):
cmd_name = cmd.strip()
if len(cmd_name) > 1:
cmd_name = cmd_name[1:]
elif not cmd_name == ':':
return None
parser = parsers.cmd_line.CommandLineParser(cmd[1:])
r_ = parser.parse_cmd_line()
command = r_['commands'][0]['cmd']
bang = r_['commands'][0]['forced']
args = r_['commands'][0]['args']
cmd_data = find_command(command)
if not cmd_data:
return
cmd_data = EX_COMMANDS[cmd_data]
can_have_range = ex_error.ERR_NO_RANGE_ALLOWED not in cmd_data.error_on
cmd_args = {}
for pattern in cmd_data.invocations:
found_args = pattern.search(args)
if found_args:
found_args = found_args.groupdict()
# get rid of unset arguments so they don't clobber defaults
found_args = dict((k, v) for k, v in found_args if v is not None)
cmd_args.update(found_args)
break
parse_errors = []
for err in cmd_data.error_on:
if err == ex_error.ERR_NO_BANG_ALLOWED and bang:
parse_errors.append(ex_error.ERR_NO_BANG_ALLOWED)
if err == ex_error.ERR_TRAILING_CHARS and args:
parse_errors.append(ex_error.ERR_TRAILING_CHARS)
if err == ex_error.ERR_NO_RANGE_ALLOWED and r_['range']['text_range']:
parse_errors.append(ex_error.ERR_NO_RANGE_ALLOWED)
if err == ex_error.ERR_INVALID_RANGE and not cmd_args:
parse_errors.append(ex_error.ERR_INVALID_RANGE)
if err == ex_error.ERR_ADDRESS_REQUIRED and not cmd_args:
parse_errors.append(ex_error.ERR_ADDRESS_REQUIRED)
return EX_CMD(name=command,
command=cmd_data.command,
forced=bang,
args=cmd_args,
parse_errors=parse_errors,
line_range=r_['range'],
can_have_range=can_have_range,)
| {
"repo_name": "gak/VintageousEx",
"path": "vex/ex_command_parser.py",
"copies": "1",
"size": "19079",
"license": "mit",
"hash": -7971690527752942000,
"line_mean": 47.3012658228,
"line_max": 135,
"alpha_frac": 0.3594003879,
"autogenerated": false,
"ratio": 4.962028608582575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5821428996482575,
"avg_score": null,
"num_lines": null
} |
"""A simple parser for "new ascii format" cpio archives"""
from . import *
from ..io import *
from ..util import *
CpioHeader = Struct('CpioHeader', [
('magic', Struct.STR % 6),
('inode', Struct.STR % 8),
('mode', Struct.STR % 8),
('uid', Struct.STR % 8),
('gid', Struct.STR % 8),
('nlink', Struct.STR % 8),
('mtime', Struct.STR % 8),
('size', Struct.STR % 8),
('...', 32),
('nameSize', Struct.STR % 8),
('check', Struct.STR % 8),
])
cpioHeaderMagic = b'070701'
def isCpio(file):
"""Returns true if the file provided is a cpio file"""
header = CpioHeader.unpack(file)
return header and header.magic == cpioHeaderMagic
def _roundUp(n, i):
return (n + i - 1) // i * i
def readCpio(file):
"""Unpacks a cpio archive and returns the contained files"""
offset = 0
while True:
header = CpioHeader.unpack(file, offset)
if header.magic != cpioHeaderMagic:
raise Exception('Wrong magic')
header = CpioHeader.tuple._make(int(i, 16) for i in header)
file.seek(offset + CpioHeader.size)
name = file.read(header.nameSize).rstrip(b'\0').decode('ascii')
if name == 'TRAILER!!!':
break
dataStart = _roundUp(offset + CpioHeader.size + header.nameSize, 4)
offset = _roundUp(dataStart + header.size, 4)
yield UnixFile(
path = '/' + name,
size = header.size,
mtime = header.mtime,
mode = header.mode,
uid = header.uid,
gid = header.gid,
contents = FilePart(file, dataStart, header.size),
)
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/archive/cpio.py",
"copies": "1",
"size": "1443",
"license": "mit",
"hash": 1320602102924121000,
"line_mean": 24.7678571429,
"line_max": 69,
"alpha_frac": 0.6403326403,
"autogenerated": false,
"ratio": 3.0443037974683542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4184636437768354,
"avg_score": null,
"num_lines": null
} |
"""A simple parser for tar archives"""
import io
from stat import *
import tarfile
from . import *
from ..util import *
TarHeader = Struct('TarHeader', [
('...', 257),
('magic', Struct.STR % 8),
('...', 235),
])
tarHeaderMagic = [b'ustar\x0000', b'ustar \0']
def _convertFileType(type):
return {
tarfile.REGTYPE: S_IFREG,
tarfile.LNKTYPE: S_IFLNK,
tarfile.SYMTYPE: S_IFLNK,
tarfile.CHRTYPE: S_IFCHR,
tarfile.BLKTYPE: S_IFBLK,
tarfile.DIRTYPE: S_IFDIR,
tarfile.FIFOTYPE: S_IFIFO,
}.get(type, S_IFREG)
def isTar(file):
"""Returns true if the file provided is a tar file"""
header = TarHeader.unpack(file)
return header and header.magic in tarHeaderMagic
def readTar(file):
"""Unpacks a .tar file and returns a the contained files"""
file.seek(0)
tar = tarfile.TarFile(fileobj=file)
for member in tar:
yield UnixFile(
path = '/' + member.name,
size = member.size,
mtime = member.mtime,
mode = _convertFileType(member.type) | member.mode,
uid = member.uid,
gid = member.gid,
contents = tar.extractfile(member) if not member.issym() else io.BytesIO(member.linkname.encode('latin1')),
)
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/archive/tar.py",
"copies": "1",
"size": "1139",
"license": "mit",
"hash": 8760037356466184000,
"line_mean": 23.7608695652,
"line_max": 110,
"alpha_frac": 0.678665496,
"autogenerated": false,
"ratio": 2.981675392670157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9048835700404968,
"avg_score": 0.02230103765303773,
"num_lines": 46
} |
"""A simple parser for zip archives"""
from collections import namedtuple
import time
import zipfile
ZipFile = namedtuple('ZipFile', 'path, size, mtime, contents')
from ..util import *
ZipHeader = Struct('ZipHeader', [
('magic', Struct.STR % 4),
('...', 26),
])
zipHeaderMagic = b'PK\x03\x04'
class _MySharedFile(object):
def __init__(self, file):
self._file = file
self._pos = file.tell()
def read(self, n=-1):
self._file.seek(self._pos)
data = self._file.read(n)
self._pos = self._file.tell()
return data
def close(self):
if self._file is not None:
self._file.close()
self._file = None
def isZip(file):
"""Returns true if the file provided is a zip file"""
header = ZipHeader.unpack(file)
return header and header.magic == zipHeaderMagic
def readZip(file):
"""Takes the a .zip file and returns the contained files"""
zip = zipfile.ZipFile(file, 'r')
for member in zip.infolist():
contents = zip.open(member)
if contents._fileobj.__class__.__name__ != '_SharedFile':
# Python 2
contents._fileobj = _MySharedFile(contents._fileobj)
yield ZipFile(
path = member.filename,
size = member.file_size,
mtime = time.mktime(member.date_time + (-1, -1, -1)),
contents = contents,
)
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/zip/__init__.py",
"copies": "1",
"size": "1241",
"license": "mit",
"hash": -2969467289764412000,
"line_mean": 22.4150943396,
"line_max": 62,
"alpha_frac": 0.6615632554,
"autogenerated": false,
"ratio": 3.3181818181818183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44797450735818184,
"avg_score": null,
"num_lines": null
} |
# A simple Pascal interpreter that just adds two single-digit integers
# Token types
#
# EOF token indicates no more input left for lexical analysis
INTEGER, PLUS, EOF = 'INTEGER', 'PLUS', 'EOF'
class Token(object):
def __init__(self, type, value):
# token type: INTEGER, PLUS, or EOF
self.type = type
# token value: 0-9, '+', or None
self.value = value
def __str__(self):
"""String representation of the instance
Examples:
Token(INTEGER, 3)
Token(PLUS '+')
"""
return 'Token({type}, {value})'.format(
type=self.type,
value=repr(self.value)
)
def __repr__(self):
return self.__str__()
class Interpreter(object):
def __init__(self, text):
# client string input, e.g. "3+5"
self.text = text
# self.pos is an index to into self.text
self.pos = 0
# current token instance
self.current_token = None
def error(self):
raise Exception('Error parsing output')
def get_next_token(self):
""" Lexical analyzer/scanner/tokenizer
This method is responsible for breaking sentence apart into tokens
one token at a time
"""
text = self.text
# is self.pos index past the end of the self.text ?
# if so, then return EOF token because there is no more
# input left to convert into tokens
if self.pos > len(text) - 1:
return Token(EOF, None)
# get a character at the position self.pos and decide
# what token to create based on the single character
current_char = text[self.pos]
# if the character is a digit then convert it to
# integer, create an INTEGER token, increment self.pos
# index to point to the next character after the digit,
# and return the INTEGER token
if current_char.isdigit():
token = Token(INTEGER, int(current_char))
self.pos += 1
return token
if current_char == '+':
token = Token(PLUS, current_char)
self.pos += 1
return token
self.error()
def eat(self, token_type):
# compare the current token type with the passed token
# type and if they match then "eat" the current token
# and assign the next token to the self.current_token,
# otherwise raise an exception.
if self.current_token.type == token_type:
self.current_token = self.get_next_token()
else:
self.error()
def expr(self):
"""expr -> INTEGER PLUS INTEGER"""
# set current token to the first token taken from the input
self.current_token = self.get_next_token()
# we expect the current token to be a single-digit integer
left = self.current_token
self.eat(INTEGER)
# expect current token to be a '+' token
op = self.current_token
self.eat(PLUS)
# we expect the current token to be a single-digit integer
right = self.current_token
self.eat(INTEGER)
# after the above call the self.current_token is set to
# EOF token
# at this point INTEGER PLUS INTEGER sequence of tokens
# has been successfully found and the method can just
# return the result of adding two integers, thus
# effectively interpreting client input
result = left.value + right.value
return result
def main():
while True:
try:
text = raw_input('calc> ')
except EOFError:
break
if not text:
continue
interpreter = Interpreter(text)
result = interpreter.expr()
print(result)
if __name__ == "__main__":
main()
| {
"repo_name": "RagBillySandstone/google-python-exercises",
"path": "my_own_exercises/pascal_interpreter/calc1.py",
"copies": "1",
"size": "3487",
"license": "apache-2.0",
"hash": 5737986754006318000,
"line_mean": 25.6183206107,
"line_max": 70,
"alpha_frac": 0.6389446516,
"autogenerated": false,
"ratio": 3.8830734966592426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5022018148259243,
"avg_score": null,
"num_lines": null
} |
"""A simple pet shop
Shows a list of animals, and you can click on each one.
Image resizing and page view tracking performed using lightbus.
"""
import lightbus
from flask import Flask
from lightbus_examples.ex03_worked_example.store.bus import bus
app = Flask(__name__)
lightbus.configure_logging()
PETS = (
"http://store.company.com/image1.jpg",
"http://store.company.com/image2.jpg",
"http://store.company.com/image3.jpg",
)
@app.route("/")
def home():
html = "<h1>Online pet store</h1><br>"
for pet_num, image_url in enumerate(PETS):
resized_url = bus.image.resize(url=image_url, width=200, height=200)
html += f'<a href="/pet/{pet_num}">' f'<img src="{resized_url}">' f"</a> "
bus.store.page_view.fire(url="/")
return html
@app.route("/pet/<int:pet_num>")
def pet(pet_num):
resized_url = bus.image.resize(url=PETS[pet_num], width=200, height=200)
bus.store.page_view.fire(url=f"/pet/{pet_num}")
html = f"<h1>Pet {pet_num}</h1>"
html = f'<img src="{resized_url}"><br />'
return html
| {
"repo_name": "adamcharnock/lightbus",
"path": "lightbus_examples/ex03_worked_example/store/web.py",
"copies": "1",
"size": "1064",
"license": "apache-2.0",
"hash": -2600018102164512000,
"line_mean": 24.3333333333,
"line_max": 82,
"alpha_frac": 0.6447368421,
"autogenerated": false,
"ratio": 2.8679245283018866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40126613704018865,
"avg_score": null,
"num_lines": null
} |
""" A simple PID controller class.
This is a mostly literal C++ -> Python translation of the ROS
control_toolbox Pid class: http://ros.org/wiki/control_toolbox.
"""
import time
import math
# from http://w3.cs.jmu.edu/spragunr/CS354_S13/labs/pa1/pid.py
#*******************************************************************
# Translated from pid.cpp by Nathan Sprague
# Jan. 2013
# See below for original license information:
#*******************************************************************
#*******************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#*******************************************************************
class PID(object):
""" A basic pid class.
This class implements a generic structure that can be used to
create a wide range of pid controllers. It can function
independently or be subclassed to provide more specific controls
based on a particular control loop.
In particular, this class implements the standard pid equation:
$command = -p_{term} - i_{term} - d_{term} $
where:
$ p_{term} = p_{gain} * p_{error} $
$ i_{term} = i_{gain} * i_{error} $
$ d_{term} = d_{gain} * d_{error} $
$ i_{error} = i_{error} + p_{error} * dt $
$ d_{error} = (p_{error} - p_{error last}) / dt $
given:
$ p_{error} = p_{state} - p_{target} $.
"""
def __init__(self, p_gain, i_gain, d_gain, i_max, i_min):
"""Constructor, zeros out Pid values when created and
initialize Pid-gains and integral term limits.
Parameters:
p_gain The proportional gain.
i_gain The integral gain.
d_gain The derivative gain.
i_max The integral upper limit.
i_min The integral lower limit.
"""
self.set_gains(p_gain, i_gain, d_gain, i_max, i_min)
self.reset()
def reset(self):
""" Reset the state of this PID controller """
self._p_error_last = 0.0 # Save position state for derivative
# state calculation.
self._p_error = 0.0 # Position error.
self._d_error = 0.0 # Derivative error.
self._i_error = 0.0 # Integator error.
self._cmd = 0.0 # Command to send.
self._last_time = None # Used for automatic calculation of dt.
def set_gains(self, p_gain, i_gain, d_gain, i_max, i_min):
""" Set PID gains for the controller.
Parameters:
p_gain The proportional gain.
i_gain The integral gain.
d_gain The derivative gain.
i_max The integral upper limit.
i_min The integral lower limit.
"""
self._p_gain = p_gain
self._i_gain = i_gain
self._d_gain = d_gain
self._i_max = i_max
self._i_min = i_min
@property
def p_gain(self):
""" Read-only access to p_gain. """
return self._p_gain
@property
def i_gain(self):
""" Read-only access to i_gain. """
return self._i_gain
@property
def d_gain(self):
""" Read-only access to d_gain. """
return self._d_gain
@property
def i_max(self):
""" Read-only access to i_max. """
return self._i_max
@property
def i_min(self):
""" Read-only access to i_min. """
return self._i_min
@property
def p_error(self):
""" Read-only access to p_error. """
return self._p_error
@property
def i_error(self):
""" Read-only access to i_error. """
return self._i_error
@property
def d_error(self):
""" Read-only access to d_error. """
return self._d_error
@property
def cmd(self):
""" Read-only access to the latest command. """
return self._cmd
def __str__(self):
""" String representation of the current state of the controller. """
result = ""
result += "p_gain: " + str(self.p_gain) + "\n"
result += "i_gain: " + str(self.i_gain) + "\n"
result += "d_gain: " + str(self.d_gain) + "\n"
result += "i_max: " + str(self.i_max) + "\n"
result += "i_min: " + str(self.i_min) + "\n"
result += "p_error: " + str(self.p_error) + "\n"
result += "i_error: " + str(self.i_error) + "\n"
result += "d_error: " + str(self.d_error) + "\n"
result += "cmd: " + str(self.cmd) + "\n"
return result
def update_PID(self, p_error, dt=None):
""" Update the Pid loop with nonuniform time step size.
Parameters:
p_error Error since last call (p_state - p_target)
dt Change in time since last call, in seconds, or None.
If dt is None, then the system clock will be used to
calculate the time since the last update.
"""
if dt == None:
cur_time = time.time()
if self._last_time is None:
self._last_time = cur_time
dt = cur_time - self._last_time
self._last_time = cur_time
self._p_error = p_error # this is pError = pState-pTarget
if dt == 0 or math.isnan(dt) or math.isinf(dt):
return 0.0
# Calculate proportional contribution to command
p_term = self._p_gain * self._p_error
# Calculate the integral error
self._i_error += dt * self._p_error
# Calculate integral contribution to command
i_term = self._i_gain * self._i_error
# Limit i_term so that the limit is meaningful in the output
if i_term > self._i_max and self._i_gain != 0:
i_term = self._i_max
self._i_error = i_term / self._i_gain
elif i_term < self._i_min and self._i_gain != 0:
i_term = self._i_min
self._i_error = i_term / self._i_gain
# Calculate the derivative error
self._d_error = (self._p_error - self._p_error_last) / dt
self._p_error_last = self._p_error
# Calculate derivative contribution to command
d_term = self._d_gain * self._d_error
self._cmd = -p_term - i_term - d_term
return self._cmd
if __name__ == "__main__":
controller = PID(1.0, 2.0, 3.0, 1.0, -1.0)
print controller
controller.update_PID(-1)
print controller
controller.update_PID(-.5)
print controller
| {
"repo_name": "smaria/autonomous-sailing-robot",
"path": "src/boat_pid_control/src/boat_pid_control/pid_controller_class.py",
"copies": "2",
"size": "8112",
"license": "bsd-2-clause",
"hash": -40225032037028410,
"line_mean": 33.8154506438,
"line_max": 77,
"alpha_frac": 0.5658284024,
"autogenerated": false,
"ratio": 3.81203007518797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0065131289445763945,
"num_lines": 233
} |
"""A simple plotting tool to create spectral diagnostics plots similar to those
originally proposed by M. Kromer (see, for example, Kromer et al. 2013, figure
4).
"""
import logging
import numpy as np
import astropy.units as units
import astropy.constants as csts
import pandas as pd
try:
import astropy.modeling.blackbody as abb
except ImportError: # for astropy version < 2.0
import astropy.analytic_functions as abb
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
import matplotlib.cm as cm
from tardis_minimal_model import minimal_model
from tardis.util.base import (
species_string_to_tuple,
species_tuple_to_string,
roman_to_int,
int_to_roman,
)
plt.rcdefaults()
logger = logging.getLogger(__name__)
elements = pd.read_csv("elements.csv", names=["chem_symbol", "atomic_no"])
inv_elements = pd.Series(
elements["chem_symbol"], index=elements["atomic_no"]
).to_dict()
class tardis_kromer_plotter(object):
"""A plotter, generating spectral diagnostics plots as proposed by M.
Kromer.
With this tool, a specific visualisation of Tardis spectra may be produced.
It illustrates which elements predominantly contribute to the emission and
absorption part of the emergent (virtual) packet spectrum.
Once a model is defined, a series of queries is performed on the packet
property arrays. The results are cached and the "Kromer" plot is produced
with the main method of this class, namely with ~generate_plot.
Parameters
----------
mdl : minimal_model
a minimal_model object containing the Tardis run
mode : str, optional
'real' (default) or 'virtual'; determines which packet population is
used to generate the Kromer plot.
Notes
-----
For this to work, the model must be generated by a Tardis calculation using
the virtual packet logging capability. This requires a compilation with the
--with-vpacket-logging flag.
This way of illustrating the spectral synthesis process was introduced by
M. Kromer (see e.g. [1]_).
References
----------
.. [1] Kromer et al. "SN 2010lp - Type Ia Supernova from a Violent Merger
of Two Carbon-Oxygen White Dwarfs" ApjL, 2013, 778, L18
"""
def __init__(self, mdl, mode="real"):
self._mode = None
self.mode = mode
self._mdl = None
self._zmax = 100
self._cmap = cm.jet
self._xlim = None
self._ylim = None
self._twinx = False
self._bins = None
self._ax = None
self._pax = None
self._noint_mask = None
self._escat_mask = None
self._escatonly_mask = None
self._line_mask = None
self._lam_escat = None
self._lam_noint = None
self._weights_escat = None
self._weights_noint = None
self._line_in_infos = None
self._line_in_nu = None
self._line_in_L = None
self._line_out_infos = None
self._line_out_nu = None
self._line_out_L = None
self.mdl = mdl
@property
def mode(self):
"""packet mode - use real or virtual packets for plotting"""
return self._mode
@mode.setter
def mode(self, val):
known_modes = ["real", "virtual"]
try:
assert val in known_modes
except AssertionError:
raise ValueError("unknown mode")
self._mode = val
@property
def mdl(self):
"""Tardis model object"""
return self._mdl
@mdl.setter
def mdl(self, val):
try:
assert type(val) == minimal_model
except AssertionError:
raise ValueError("'mdl' must be either a minimal_model")
if val.mode != self.mode:
raise ValueError(
"packet mode of minimal_model doesn't" " match requested mode"
)
if not val.readin:
raise ValueError("passing empty minimal_model; read in data first")
self._reset_cache()
self._mdl = val
@property
def zmax(self):
"""Maximum atomic number"""
return self._zmax
@property
def cmap(self):
"""Colour map, used to highlight the different atoms"""
return self._cmap
@property
def ax(self):
"""Main axes, containing the emission part of the Kromer plot"""
return self._ax
@property
def pax(self):
"""Secondary axes, containing the absorption part of the Kromer plot"""
return self._pax
@property
def bins(self):
"""frequency binning for the spectral visualisation"""
return self._bins
@property
def xlim(self):
"""wavelength limits"""
return self._xlim
@property
def ylim(self):
"""Flux limits"""
return self._ylim
@property
def twinx(self):
"""switch to decide where to place the absorption part of the Kromer
plot"""
return self._twinx
@property
def noint_mask(self):
"""Masking array, highlighting the packets that never interacted"""
if self._noint_mask is None:
self._noint_mask = self.mdl.last_interaction_type == -1
return self._noint_mask
@property
def escat_mask(self):
"""Masking array, highlighting the packets that performed Thomson
scatterings"""
if self._escat_mask is None:
self._escat_mask = self.mdl.last_interaction_type == 1
return self._escat_mask
@property
def escatonly_mask(self):
"""Masking array, highlighting the packets that only performed Thomson
scatterings"""
if self._escatonly_mask is None:
tmp = (
(self.mdl.last_line_interaction_in_id == -1) * (self.escat_mask)
).astype(np.bool)
self._escatonly_mask = tmp
return self._escatonly_mask
@property
def line_mask(self):
"""Mask array, highlighting packets whose last interaction was with a
line"""
if self._line_mask is None:
self._line_mask = (self.mdl.last_interaction_type > -1) * (
self.mdl.last_line_interaction_in_id > -1
)
return self._line_mask
@property
def lam_noint(self):
"""Wavelength of the non-interacting packets"""
if self._lam_noint is None:
self._lam_noint = (csts.c.cgs / (self.mdl.packet_nus[self.noint_mask])).to(
units.AA
)
return self._lam_noint
@property
def lam_escat(self):
"""Wavelength of the purely electron scattering packets"""
if self._lam_escat is None:
self._lam_escat = (
csts.c.cgs / (self.mdl.packet_nus[self.escatonly_mask])
).to(units.AA)
return self._lam_escat
@property
def weights_escat(self):
"""luminosity of the only electron scattering packets"""
if self._weights_escat is None:
self._weights_escat = (
self.mdl.packet_energies[self.escatonly_mask]
/ self.mdl.time_of_simulation
)
return self._weights_escat
@property
def weights_noint(self):
"""luminosity of the non-interacting packets"""
if self._weights_noint is None:
self._weights_noint = (
self.mdl.packet_energies[self.noint_mask] / self.mdl.time_of_simulation
)
return self._weights_noint
@property
def line_out_infos(self):
"""Line ids of the transitions packets were emitted last"""
if self._line_out_infos is None:
tmp = self.mdl.last_line_interaction_out_id
ids = tmp[self.line_mask]
self._line_out_infos = self.mdl.lines.iloc[ids]
return self._line_out_infos
@property
def line_out_nu(self):
"""frequency of the transitions packets were emitted last"""
if self._line_out_nu is None:
self._line_out_nu = self.mdl.packet_nus[self.line_mask]
return self._line_out_nu
@property
def line_out_L(self):
"""luminosity of the line interaction packets"""
if self._line_out_L is None:
tmp = self.mdl.packet_energies
self._line_out_L = tmp[self.line_mask]
return self._line_out_L
@property
def line_in_infos(self):
"""Line ids of the transitions packets were last absorbed"""
if self._line_in_infos is None:
tmp = self.mdl.last_line_interaction_in_id
ids = tmp[self.line_mask]
self._line_in_infos = self.mdl.lines.iloc[ids]
return self._line_in_infos
@property
def line_in_nu(self):
"""frequencies of the transitions packets were last absorbed"""
if self._line_in_nu is None:
nus = self.mdl.last_interaction_in_nu
self._line_in_nu = nus[self.line_mask]
return self._line_in_nu
@property
def line_in_L(self):
"""luminosity of the line interaction packets"""
if self._line_in_L is None:
tmp = self.mdl.packet_energies
self._line_in_L = tmp[self.line_mask]
return self._line_in_L
@property
def line_info(self):
"""produces list of elements to be included in the kromer plot"""
# gets list of elements and number of emitted packets
self.last_line_interaction_out_id = self.line_out_infos
self.last_line_interaction_out_angstrom = self.line_out_nu.to(
units.Angstrom, equivalencies=units.spectral()
)
self.last_line_interaction_out_id[
"emitted_wavelength"
] = self.last_line_interaction_out_angstrom
self.line_out_infos_within_xlims = self.last_line_interaction_out_id.loc[
(
self.last_line_interaction_out_id.emitted_wavelength
>= self._xlim[0]
)
& (
self.last_line_interaction_out_id.emitted_wavelength
<= self._xlim[1]
)
]
# gets list of elements and number of absorbed packets
self.last_line_interaction_in_id = self.line_in_infos
self.last_line_interaction_in_angstrom = self.line_in_nu.to(
units.Angstrom, equivalencies=units.spectral()
)
self.last_line_interaction_in_id[
"emitted_wavelength"
] = self.last_line_interaction_in_angstrom
self.line_in_infos_within_xlims = self.last_line_interaction_in_id.loc[
(
self.last_line_interaction_in_id.emitted_wavelength
>= self._xlim[0]
)
& (
self.last_line_interaction_in_id.emitted_wavelength
<= self._xlim[1]
)
]
self.line_in_and_out_infos_within_xlims = pd.concat([self.line_in_infos_within_xlims, self.line_out_infos_within_xlims])
# this generates the 4-digit ID for all transitions in the model
# (e.g. Fe III line --> 2602)
self.line_in_and_out_infos_within_xlims["ion_id"] = (
self.line_in_and_out_infos_within_xlims["atomic_number"] * 100
+ self.line_in_and_out_infos_within_xlims["ion_number"]
)
# this is a list that will hold which elements should all be in the
# same colour. This is used if the user requests a mix of ions and
# elements.
self.keep_colour = []
# this reads in the species specified by user and generates the 4-digit
# ID keys for them
if self._species_list is not None:
# create a list of the ions ids requested by species_list
requested_species_ids = []
# check if there are any digits in the species list. If there are
# then exit
# species_list should only contain species in the Roman numeral
# format, e.g. Si II, and each ion must contain a space
if any(char.isdigit() for char in " ".join(self._species_list)) == True:
raise ValueError(
"All species must be in Roman numeral form, e.g. Si II"
)
else:
# go through each of the request species. Check whether it is
# an element or ion (ions have spaces). If it is an element,
# add all possible ions to the ions list. Otherwise just add
# the requested ion
for species in self._species_list:
if " " in species:
requested_species_ids.append(
[
species_string_to_tuple(species)[0] * 100
+ species_string_to_tuple(species)[1]
]
)
else:
atomic_number = elements.loc[elements['chem_symbol'] == species.lower(), 'atomic_no'].values[0]
requested_species_ids.append(
[atomic_number * 100 + i for i in np.arange(atomic_number)]
)
self.keep_colour.append(atomic_number)
self.requested_species_ids = [
species_id for list in requested_species_ids for species_id in list
]
# now we are getting the list of unique values for 'ion_id' if we would
# like to use species. Otherwise we get unique atomic numbers
if self._species_list is not None:
self._elements_in_kromer_plot = np.c_[
np.unique(
self.line_in_and_out_infos_within_xlims.ion_id.values,
return_counts=True,
)
]
else:
self._elements_in_kromer_plot = np.c_[
np.unique(
self.line_in_and_out_infos_within_xlims.atomic_number.values,
return_counts=True,
)
]
return self._elements_in_kromer_plot
def _reset_cache(self):
"""Reset cached variables - only needed in case the model is changed
after initialisation"""
self._noint_mask = None
self._escat_mask = None
self._escatonly_mask = None
self._line_mask = None
self._lam_escat = None
self._lam_noint = None
self._weights_escat = None
self._weights_noint = None
self._line_in_infos = None
self._line_in_nu = None
self._line_in_L = None
self._line_out_infos = None
self._line_out_nu = None
self._line_out_L = None
def generate_plot(
self,
ax=None,
cmap=cm.jet,
bins=None,
xlim=None,
ylim=None,
nelements=None,
twinx=False,
species_list=None,
):
"""Generate the actual "Kromer" plot
Parameters
----------
ax : matplotlib.axes or None
axes object into which the emission part of the Kromer plot should
be plotted; if None, a new one is generated (default None)
cmap : matplotlib.cm.ListedColormap or None
color map object used for the illustration of the different atomic
contributions (default matplotlib.cm.jet)
bins : np.ndarray or None
array of the wavelength bins used for the illustration of the
atomic contributions; if None, the same binning as for the stored
virtual spectrum is used (default None)
xlim : tuple or array-like or None
wavelength limits for the display; if None, the x-axis is
automatically scaled (default None)
ylim : tuple or array-like or None
flux limits for the display; if None, the y-axis is automatically
scaled (default None)
nelements: int or None
number of elements that should be included in the Kromer plots.
The top nelements are determined based on those with the most packet
interactions
twinx : boolean
determines where the absorption part of the Kromer plot is placed,
if True, the absorption part is attached at the top of the main
axes box, otherwise it is placed below the emission part (default
False)
species_list: list of strings or None
list of strings containing the names of species that should be included in the Kromer plots,
e.g. ['Si II', 'Ca II']
Returns
-------
fig : matplotlib.figure
figure instance containing the plot
"""
self._ax = None
self._pax = None
self._cmap = cmap
self._ax = ax
self._ylim = ylim
self._twinx = twinx
# the species list can contain either a specific element, a specific
# ion, a range of ions, or any combination of these if the list
# contains a range of ions, separate each one into a new entry in the
# species list
full_species_list = []
if species_list is not None:
for species in species_list:
# check if a hyphen is present. If it is, then it indicates a
# range of ions. Add each ion in that range to the list
if "-" in species:
element = species.split(" ")[0]
first_ion_numeral = roman_to_int(
species.split(" ")[-1].split("-")[0]
)
second_ion_numeral = roman_to_int(
species.split(" ")[-1].split("-")[-1]
)
for i in np.arange(first_ion_numeral, second_ion_numeral + 1):
full_species_list.append(element + " " + int_to_roman(i))
else:
full_species_list.append(species)
self._species_list = full_species_list
else:
self._species_list = None
if xlim is None:
self._xlim = [
np.min(self.mdl.spectrum_wave).value,
np.max(self.mdl.spectrum_wave).value,
]
else:
self._xlim = xlim
if bins is None:
self._bins = self.mdl.spectrum_wave[::-1]
else:
self._bins = bins
# get the elements/species to be included in the plot
self._elements_in_kromer_plot = self.line_info
# if no nelements and no species list is specified, then the number of
# elements to be included in the colourbar is determined from the list
# of unique elements that appear in the model
if nelements is None and species_list is None:
self._nelements = len(np.unique(self.line_in_and_out_infos_within_xlims.atomic_number.values))
elif nelements is None and species_list is not None:
# if species_list has been specified, then the number of elements
# to be included is set to the length of that list
self._nelements = len(self._species_list)
else:
# if nelements has been specified, then the number of elements to
# be included is set to the length of that list
self._nelements = nelements
# if the length of self._elements_in_kromer_plot exceeds the requested
# number of elements to be included in the colourbar, then this if
# statement applies
if self._species_list is not None:
# if we have specified a species list then only take those species
# that are requested
mask = np.in1d(
self._elements_in_kromer_plot[:, 0], self.requested_species_ids
)
self._elements_in_kromer_plot = self._elements_in_kromer_plot[mask]
elif len(self._elements_in_kromer_plot) > self._nelements:
# if nelements is specified, then sort to find the top contributing
# elements, pick the top nelements, and sort back by atomic number
self._elements_in_kromer_plot = self._elements_in_kromer_plot[
np.argsort(self._elements_in_kromer_plot[:, 1])[::-1]
]
self._elements_in_kromer_plot = self._elements_in_kromer_plot[
: self._nelements
]
self._elements_in_kromer_plot = self._elements_in_kromer_plot[
np.argsort(self._elements_in_kromer_plot[:, 0])
]
else:
# if the length of self._elements_in_kromer_plot is less than the
# requested number of elements in the model, then this requested
# length is updated to be the length of length of
# self._elements_in_kromer_plot
self._nelements = len(self._elements_in_kromer_plot)
# this will reset nelements if species_list is turned on
# it's possible to request a species that doesn't appear in the plot
# this will ensure that species isn't counted when determining labels
# and colours
if self._species_list is not None:
labels = []
for species in self._species_list:
if " " in species:
atomic_number = species_string_to_tuple(species)[0]
ion_number = species_string_to_tuple(species)[1]
species_id = atomic_number * 100 + ion_number
if species_id in self._elements_in_kromer_plot:
labels.append(species)
else:
labels.append(species)
self._nelements = len(labels)
self._axes_handling_preparation()
self._generate_emission_part()
self._generate_photosphere_part()
self._generate_and_add_colormap()
self._generate_and_add_legend()
self._paxes_handling_preparation()
self._generate_absorption_part()
self._axis_handling_label_rescale()
return plt.gcf()
def _axes_handling_preparation(self):
"""prepare the main axes; create a new axes if none exists"""
if self._ax is None:
self._ax = plt.figure().add_subplot(111)
def _paxes_handling_preparation(self):
"""prepare the axes for the absorption part of the Kromer plot
according to the twinx value"""
if self.twinx:
self._pax = self._ax.twinx()
else:
self._pax = self._ax
def _generate_emission_part(self):
"""generate the emission part of the Kromer plot"""
lams = [self.lam_noint, self.lam_escat]
weights = [self.weights_noint, self.weights_escat]
colors = ["black", "grey"]
# if species_list is entered, the ion_id will be used to determine the
# colours, etc
if self._species_list is not None:
values_to_compare = np.unique(
self.line_in_and_out_infos_within_xlims.ion_id.values,
return_counts=False,
)
else:
# otherwise, if there is no species_list, then the atomic_number i
#s used for colours, etc.
values_to_compare = np.unique(
self.line_in_and_out_infos_within_xlims.atomic_number.values,
return_counts=False,
)
# this first for loop is to go through all elements and colour all
# elements as 'Other' if they weren't requested or among the top
# nelements. The reason to do it twice is to ensure that the colours
# are stacked appropriately, e.g. all 'other' are together
other_species_lams = []
other_species_weights = []
for zi in values_to_compare:
# zi is the unique 4-digit code for the species in the model
# determining the atomic and ion numbers for all ions in our model
if self._species_list is not None:
ion_number = zi % 100
atomic_number = (zi - ion_number) / 100
else:
atomic_number = zi
# if the ion is not included in our list for the colourbar, then
# its contribution is added here to the miscellaneous grey shaded
# region of the plot
if zi not in self._elements_in_kromer_plot[:, 0]:
# if species_list is given then use the atomic number and
# ion_number to peforming masking
if self._species_list is not None:
mask = (
self.line_out_infos.atomic_number.values == atomic_number
) & (self.line_out_infos.ion_number.values == ion_number)
else:
# otherwise only elements are plotted, so only use the
# atomic number
mask = self.line_out_infos.atomic_number.values == atomic_number
other_species_lams += (csts.c.cgs / (self.line_out_nu[mask])).to(units.AA).value.tolist()
other_species_weights += (self.line_out_L[mask] / self.mdl.time_of_simulation).value.tolist()
other_species_lams = other_species_lams * units.AA
other_species_weights = other_species_weights * units.erg / units.s
lams.append(other_species_lams)
weights.append(other_species_weights)
colors.append("silver")
ii = 0
# this is a variable that will allow for situations where elements and
# ions are requested in the same list this will ensure that any ions
# for a requested element will all be coloured the same
previous_atomic_number = 0
for zi in values_to_compare:
# zi is the unique 4-digit code for the species in the model
# determining the atomic and ion numbers for all ions in our model
if self._species_list is not None:
ion_number = zi % 100
atomic_number = (zi - ion_number) / 100
else:
atomic_number = zi
# if the ion is included in our list for the colourbar, then its
# contribution is added here as a colour to the plot
if zi in self._elements_in_kromer_plot[:, 0]:
# if this is the first ion, don't update the colour
if (previous_atomic_number == 0):
ii = ii
previous_atomic_number = atomic_number
elif atomic_number in self.keep_colour:
# if this ion is grouped into an element, check whether
# this is the first ion of that element to occur if it is,
# then update the colour. If it isn't then don't update the
# colour
if previous_atomic_number == atomic_number:
ii = ii
previous_atomic_number = atomic_number
else:
ii = ii +1
previous_atomic_number = atomic_number
else:
ii = ii + 1
previous_atomic_number = atomic_number
if self._species_list is not None:
mask = (
self.line_out_infos.atomic_number.values == atomic_number
) & (self.line_out_infos.ion_number.values == ion_number)
else:
mask = self.line_out_infos.atomic_number.values == atomic_number
lams.append((csts.c.cgs / (self.line_out_nu[mask])).to(units.AA))
weights.append(self.line_out_L[mask] / self.mdl.time_of_simulation)
colors.append(self.cmap(float(ii) / float(self._nelements)))
Lnorm = 0
for w, lam in zip(weights, lams):
Lnorm += np.sum(w[(lam >= self.bins[0]) * (lam <= self.bins[-1])])
lams = [tmp_lam.value for tmp_lam in lams]
weights = [tmp_wt.value for tmp_wt in weights]
ret = self.ax.hist(
lams,
bins=self.bins.value,
stacked=True,
histtype="stepfilled",
density=True,
weights=weights,
)
for i, col in enumerate(ret[-1]):
for reti in col:
reti.set_facecolor(colors[i])
reti.set_edgecolor(colors[i])
reti.set_linewidth(0)
reti.xy[:, 1] *= Lnorm.to("erg / s").value
self.ax.plot(
self.mdl.spectrum_wave,
self.mdl.spectrum_luminosity,
color="blue",
drawstyle="steps-post",
lw=0.5,
)
def _generate_photosphere_part(self):
"""generate the photospheric input spectrum part of the Kromer plot"""
Lph = (
abb.blackbody_lambda(self.mdl.spectrum_wave, self.mdl.t_inner)
* 4
* np.pi ** 2
* self.mdl.R_phot ** 2
* units.sr
).to("erg / (AA s)")
self.ax.plot(self.mdl.spectrum_wave, Lph, color="red", ls="dashed")
def _generate_absorption_part(self):
"""generate the absorption part of the Kromer plot"""
lams = []
weights = []
colors = []
if self._species_list is not None:
values_to_compare = np.unique(
self.line_in_and_out_infos_within_xlims.ion_id.values,
return_counts=False,
)
else:
values_to_compare = np.unique(
self.line_in_and_out_infos_within_xlims.atomic_number.values,
return_counts=False,
)
other_species_lams = []
other_species_weights = []
for zi in values_to_compare:
# zi is the unique 4-digit code for the species in the model
# determining the atomic and ion numbers for all ions in our model
if self._species_list is not None:
ion_number = zi % 100
atomic_number = (zi - ion_number) / 100
else:
atomic_number = zi
# if the ion is not included in our list for the colourbar, then
# its contribution is added here to the miscellaneous grey shaded
# region of the plot
if zi not in self._elements_in_kromer_plot[:, 0]:
if self._species_list is not None:
mask = (
self.line_out_infos.atomic_number.values == atomic_number
) & (self.line_out_infos.ion_number.values == ion_number)
else:
mask = self.line_out_infos.atomic_number.values == atomic_number
other_species_lams += (csts.c.cgs / (self.line_in_nu[mask])).to(units.AA).value.tolist()
other_species_weights += (self.line_in_L[mask] / self.mdl.time_of_simulation).value.tolist()
other_species_lams = other_species_lams * units.AA
other_species_weights = other_species_weights * units.erg / units.s
lams.append(other_species_lams)
weights.append(other_species_weights)
colors.append("silver")
ii = 0
previous_atomic_number = 0
for zi in values_to_compare:
# zi is the unique 4-digit code for the species in the model
# determining the atomic and ion numbers for all ions in our model
if self._species_list is not None:
ion_number = zi % 100
atomic_number = (zi - ion_number) / 100
else:
atomic_number = zi
# if the ion is included in our list for the colourbar, then its
# contribution is added here as a unique colour to the plot
if zi in self._elements_in_kromer_plot[:, 0]:
# if this is the first ion, don't update the colour
if (previous_atomic_number == 0):
ii = ii
previous_atomic_number = atomic_number
elif atomic_number in self.keep_colour:
# if this ion is grouped into an element, check whether
# this is the first ion of that element to occur if it is,
# then update the colour. If it isn't then don't update the
# colour
if previous_atomic_number == atomic_number:
ii = ii
previous_atomic_number = atomic_number
else:
ii = ii +1
previous_atomic_number = atomic_number
else:
ii = ii + 1
previous_atomic_number = atomic_number
if self._species_list is not None:
mask = (
self.line_out_infos.atomic_number.values == atomic_number
) & (self.line_out_infos.ion_number.values == ion_number)
else:
mask = self.line_out_infos.atomic_number.values == atomic_number
lams.append((csts.c.cgs / (self.line_in_nu[mask])).to(units.AA))
weights.append(self.line_in_L[mask] / self.mdl.time_of_simulation)
colors.append(self.cmap(float(ii) / float(self._nelements)))
Lnorm = 0
for w, lam in zip(weights, lams):
Lnorm -= np.sum(w[(lam >= self.bins[0]) * (lam <= self.bins[-1])])
lams = [tmp_l.value for tmp_l in lams]
weights = [tmp_wt.value for tmp_wt in weights]
ret = self.pax.hist(
lams,
bins=self.bins.value,
stacked=True,
histtype="stepfilled",
density=True,
weights=weights,
)
for i, col in enumerate(ret[-1]):
for reti in col:
reti.set_facecolor(colors[i])
reti.set_edgecolor(colors[i])
reti.set_linewidth(0)
reti.xy[:, 1] *= Lnorm.to("erg / s").value
def _generate_and_add_colormap(self):
"""generate the custom color map, linking colours with atomic
numbers"""
values = [
self.cmap(float(i) / float(self._nelements)) for i in range(self._nelements)
]
custcmap = matplotlib.colors.ListedColormap(values)
bounds = np.arange(self._nelements) + 0.5
norm = matplotlib.colors.Normalize(vmin=0, vmax=self._nelements)
mappable = cm.ScalarMappable(norm=norm, cmap=custcmap)
mappable.set_array(np.linspace(1, self.zmax + 1, 256))
# if a species_list has been specified...
if self._species_list is not None:
labels = []
for zi in self._elements_in_kromer_plot:
ion_number = zi[0] % 100
atomic_number = (zi[0] - ion_number) / 100
ion_numeral = int_to_roman(ion_number + 1)
# using elements dictionary to get atomic symbol for the
# species
atomic_symbol = inv_elements[atomic_number].capitalize()
# if the element was requested, and not a specific ion, then
# add the element symbol to the label list
if (atomic_number in self.keep_colour) & (atomic_symbol not in labels):
# compiling the label, and adding it to the list
label = f"{atomic_symbol}"
labels.append(label)
elif atomic_number not in self.keep_colour:
# otherwise add the ion to the label list
label = f"{atomic_symbol}$\,${ion_numeral}"
labels.append(label)
else:
# if no species_list specified, generate the labels this way
labels = [
inv_elements[zi].capitalize()
for zi in self._elements_in_kromer_plot[:, 0]
]
mainax = self.ax
cbar = plt.colorbar(mappable, ax=mainax)
cbar.set_ticks(bounds)
cbar.set_ticklabels(labels)
def _generate_and_add_legend(self):
"""add legend"""
spatch = patches.Patch(color="silver", label="Other species")
gpatch = patches.Patch(color="grey", label="e-scattering")
bpatch = patches.Patch(color="black", label="Photosphere")
bline = lines.Line2D([], [], color="blue", label="Virtual spectrum")
phline = lines.Line2D(
[], [], color="red", ls="dashed", label="L at photosphere"
)
self.ax.legend(handles=[phline, bline, spatch, gpatch, bpatch])
def _axis_handling_label_rescale(self):
"""add axis labels and perform axis scaling"""
if self.ylim is None:
self.ax.autoscale(axis="y")
else:
self.ax.set_ylim(self.ylim)
self._ylim = self.ax.get_ylim()
if self.xlim is None:
self.ax.autoscale(axis="x")
else:
self.ax.set_xlim(self.xlim)
self._xlim = self.ax.get_xlim()
if self.twinx:
self.pax.set_ylim([-self.ylim[-1], -self.ylim[0]])
self.pax.set_yticklabels([])
else:
self.pax.set_ylim([-self.ylim[-1], self.ylim[-1]])
self.pax.set_xlim(self.xlim)
self.ax.set_xlabel(r"$\lambda$ [$\mathrm{\AA}$]")
ylabel = r"$L_{\mathrm{\lambda}}$ [$\mathrm{erg\,s^{-1}\,\AA^{-1}}$]"
self.ax.set_ylabel(ylabel)
| {
"repo_name": "tardis-sn/tardisanalysis",
"path": "tardis_kromer_plot.py",
"copies": "1",
"size": "37750",
"license": "bsd-3-clause",
"hash": 5773500574254749000,
"line_mean": 37.4811416922,
"line_max": 128,
"alpha_frac": 0.5572980132,
"autogenerated": false,
"ratio": 4.0499946357686945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5107292648968694,
"avg_score": null,
"num_lines": null
} |
#A simple program illustration chaotic behavior.
#Example from Python programing
def showGrid(n, firstForm, secondForm, firstDisplay, secondDisplay):
print("formula " + str(firstForm) + " " + str(secondForm))
print("------------------------------")
for i in range(n):
one = str(firstDisplay[i])
two = str(secondDisplay[i])
print(" " + one + " " + two)
def exp1(x, n): #first algebric expression
print("We will use formula 3.9 * x * (1 - x)")
li = []
for i in range(n): # loop n times with the for loop
x = 3.9 * x * (1 - x) #re-define the x var with new assignment
#print (x) #print the value of x to user
li.append(x)
#print(li)
return li
def exp2(x, n): #second algebric expression
print("We will use formula 3.9 * (x - x * x)")
li = []
for i in range(n): # loop n times with the for loop
x = 3.9 * (x - x * x) #re-define the x var with new assignment
#print (x) #print the value of x to user
li.append(x)
#print(li)
return li
def exp3(x, n): #Third algebric expression
print("We will use formula 3.9 * x - 3.9 * x * x")
li = []
for i in range(n): # loop n times with the for loop
x = 3.9 * x - 3.9 * x * x #re-define the x var with new assignment
#print (x) #print the value of x to user
li.append(x)
#print(li)
return li
def main(): # define the function main
print ("this program illustrates a caotic function") #print somethign to the user
x = eval(input("Enter a number between 0 and 1: ")) #print question to user then assign the input to x var
n = eval(input("How many numbers should I print? ")) #print question to user then assign how many numbers to print
num1 = eval(input("Choose the first algebraic formula would you like to use? Choose 1, 2, or 3: ")) #print question to user then assign the chaotic formula to use
num2 = eval(input("Choos the second algebraic formula would you like to use? Choose 1, 2, or 3: "))
if num1 == 1 and num2 == 2: #decide if the two formula's are one and two
ans1 = exp1(x, n) #Call the exp1 function and put results in the ans1 var
ans2 = exp2(x, n)
elif num1 == 1 and num2 == 3:
ans1 = exp1(x, n)
ans2 = exp3(x, n)
elif num1 == 2 and num2 == 1:
ans1 = exp2(x, n)
ans2 = exp1(x, n)
elif num1 == 2 and num2 == 3:
ans1 = exp2(x, n)
ans2 = exp3(x, n)
elif num1 == 3 and num2 == 1:
ans1 = exp3(x, n)
ans2 = exp1(x, n)
else:
ans1 = exp3(x, n)
ans2 = exp2(x, n)
showGrid(n, num1, num2, ans1, ans2)
main() #call main function | {
"repo_name": "src053/violentPy",
"path": "chap1/chaosmulti.py",
"copies": "2",
"size": "2470",
"license": "cc0-1.0",
"hash": -2581088859241928700,
"line_mean": 34.8115942029,
"line_max": 163,
"alpha_frac": 0.6356275304,
"autogenerated": false,
"ratio": 2.7353266888150607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4370954219215061,
"avg_score": null,
"num_lines": null
} |
# A simple program that implements the solution to the phrase generation problem using
# genetic algorithms as given in the search.ipynb notebook.
#
# Type on the home screen to change the target phrase
# Click on the slider to change genetic algorithm parameters
# Click 'GO' to run the algorithm with the specified variables
# Displays best individual of the current generation
# Displays a progress bar that indicates the amount of completion of the algorithm
# Displays the first few individuals of the current generation
import os.path
from tkinter import *
from tkinter import ttk
import search
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
LARGE_FONT = ('Verdana', 12)
EXTRA_LARGE_FONT = ('Consolas', 36, 'bold')
canvas_width = 800
canvas_height = 600
black = '#000000'
white = '#ffffff'
p_blue = '#042533'
lp_blue = '#0c394c'
# genetic algorithm variables
# feel free to play around with these
target = 'Genetic Algorithm' # the phrase to be generated
max_population = 100 # number of samples in each population
mutation_rate = 0.1 # probability of mutation
f_thres = len(target) # fitness threshold
ngen = 1200 # max number of generations to run the genetic algorithm
generation = 0 # counter to keep track of generation number
u_case = [chr(x) for x in range(65, 91)] # list containing all uppercase characters
l_case = [chr(x) for x in range(97, 123)] # list containing all lowercase characters
punctuations1 = [chr(x) for x in range(33, 48)] # lists containing punctuation symbols
punctuations2 = [chr(x) for x in range(58, 65)]
punctuations3 = [chr(x) for x in range(91, 97)]
numerals = [chr(x) for x in range(48, 58)] # list containing numbers
# extend the gene pool with the required lists and append the space character
gene_pool = []
gene_pool.extend(u_case)
gene_pool.extend(l_case)
gene_pool.append(' ')
# callbacks to update global variables from the slider values
def update_max_population(slider_value):
global max_population
max_population = slider_value
def update_mutation_rate(slider_value):
global mutation_rate
mutation_rate = slider_value
def update_f_thres(slider_value):
global f_thres
f_thres = slider_value
def update_ngen(slider_value):
global ngen
ngen = slider_value
# fitness function
def fitness_fn(_list):
fitness = 0
# create string from list of characters
phrase = ''.join(_list)
# add 1 to fitness value for every matching character
for i in range(len(phrase)):
if target[i] == phrase[i]:
fitness += 1
return fitness
# function to bring a new frame on top
def raise_frame(frame, init=False, update_target=False, target_entry=None, f_thres_slider=None):
frame.tkraise()
global target
if update_target and target_entry is not None:
target = target_entry.get()
f_thres_slider.config(to=len(target))
if init:
population = search.init_population(max_population, gene_pool, len(target))
genetic_algorithm_stepwise(population)
# defining root and child frames
root = Tk()
f1 = Frame(root)
f2 = Frame(root)
# pack frames on top of one another
for frame in (f1, f2):
frame.grid(row=0, column=0, sticky='news')
# Home Screen (f1) widgets
target_entry = Entry(f1, font=('Consolas 46 bold'), exportselection=0, foreground=p_blue, justify=CENTER)
target_entry.insert(0, target)
target_entry.pack(expand=YES, side=TOP, fill=X, padx=50)
target_entry.focus_force()
max_population_slider = Scale(f1, from_=3, to=1000, orient=HORIZONTAL, label='Max population',
command=lambda value: update_max_population(int(value)))
max_population_slider.set(max_population)
max_population_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
mutation_rate_slider = Scale(f1, from_=0, to=1, orient=HORIZONTAL, label='Mutation rate', resolution=0.0001,
command=lambda value: update_mutation_rate(float(value)))
mutation_rate_slider.set(mutation_rate)
mutation_rate_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
f_thres_slider = Scale(f1, from_=0, to=len(target), orient=HORIZONTAL, label='Fitness threshold',
command=lambda value: update_f_thres(int(value)))
f_thres_slider.set(f_thres)
f_thres_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
ngen_slider = Scale(f1, from_=1, to=5000, orient=HORIZONTAL, label='Max number of generations',
command=lambda value: update_ngen(int(value)))
ngen_slider.set(ngen)
ngen_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
button = ttk.Button(f1, text='RUN',
command=lambda: raise_frame(f2, init=True, update_target=True, target_entry=target_entry,
f_thres_slider=f_thres_slider)).pack(side=BOTTOM, pady=50)
# f2 widgets
canvas = Canvas(f2, width=canvas_width, height=canvas_height)
canvas.pack(expand=YES, fill=BOTH, padx=20, pady=15)
button = ttk.Button(f2, text='EXIT', command=lambda: raise_frame(f1)).pack(side=BOTTOM, pady=15)
# function to run the genetic algorithm and update text on the canvas
def genetic_algorithm_stepwise(population):
root.title('Genetic Algorithm')
for generation in range(ngen):
# generating new population after selecting, recombining and mutating the existing population
population = [
search.mutate(search.recombine(*search.select(2, population, fitness_fn)), gene_pool, mutation_rate) for i
in range(len(population))]
# genome with the highest fitness in the current generation
current_best = ''.join(max(population, key=fitness_fn))
# collecting first few examples from the current population
members = [''.join(x) for x in population][:48]
# clear the canvas
canvas.delete('all')
# displays current best on top of the screen
canvas.create_text(canvas_width / 2, 40, fill=p_blue, font='Consolas 46 bold', text=current_best)
# displaying a part of the population on the screen
for i in range(len(members) // 3):
canvas.create_text((canvas_width * .175), (canvas_height * .25 + (25 * i)), fill=lp_blue,
font='Consolas 16', text=members[3 * i])
canvas.create_text((canvas_width * .500), (canvas_height * .25 + (25 * i)), fill=lp_blue,
font='Consolas 16', text=members[3 * i + 1])
canvas.create_text((canvas_width * .825), (canvas_height * .25 + (25 * i)), fill=lp_blue,
font='Consolas 16', text=members[3 * i + 2])
# displays current generation number
canvas.create_text((canvas_width * .5), (canvas_height * 0.95), fill=p_blue, font='Consolas 18 bold',
text=f'Generation {generation}')
# displays blue bar that indicates current maximum fitness compared to maximum possible fitness
scaling_factor = fitness_fn(current_best) / len(target)
canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.9, 100, outline=p_blue)
canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.1 + scaling_factor * canvas_width * 0.8, 100,
fill=lp_blue)
canvas.update()
# checks for completion
fittest_individual = search.fitness_threshold(fitness_fn, f_thres, population)
if fittest_individual:
break
raise_frame(f1)
root.mainloop()
| {
"repo_name": "Chipe1/aima-python",
"path": "gui/genetic_algorithm_example.py",
"copies": "2",
"size": "7497",
"license": "mit",
"hash": 8644554138786683000,
"line_mean": 38.6666666667,
"line_max": 118,
"alpha_frac": 0.6733360011,
"autogenerated": false,
"ratio": 3.523026315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5196362316889473,
"avg_score": null,
"num_lines": null
} |
# A simple program that resembles the falling of stars or snow on a screen
# Coded in Python 2.7.10 with PyGame
# by Brett Burley-Inners :: 11/7/2015
import pygame, time, random, sys
pygame.init()
# Default dimensions of the game window (px)
display_width = 1280
display_height = 720
# Create a canvas to display the game on
gameScreen = pygame.display.set_mode((display_width, display_height))
# Title of the game Window
pygame.display.set_caption('Falling Stars')
# Class that creates a star object
class Star:
def __init__(self, starSize, xCoordinate, yCoordinate, starColor, fallSpeed, fallDirection):
self.starSize = starSize
self.xCoordinate = xCoordinate
self.yCoordinate = yCoordinate
self.starColor = starColor
self.fallSpeed = fallSpeed
self.fallDirection = fallDirection
def fall(self):
self.yCoordinate += self.fallSpeed
self.xCoordinate += self.fallDirection
pygame.draw.rect(gameScreen, self.starColor, [self.xCoordinate, self.yCoordinate, self.starSize, self.starSize])
if self.yCoordinate > display_height:
fallingStars.remove(self)
# Class that creates a star object
class upStar:
def __init__(self, starSize, xCoordinate, yCoordinate, starColor, fallSpeed, fallDirection):
self.starSize = starSize
self.xCoordinate = xCoordinate
self.yCoordinate = yCoordinate
self.starColor = starColor
self.fallSpeed = fallSpeed
self.fallDirection = fallDirection
def fall(self):
self.yCoordinate -= self.fallSpeed
self.xCoordinate += self.fallDirection
pygame.draw.rect(gameScreen, self.starColor, [self.xCoordinate, self.yCoordinate, self.starSize, self.starSize])
if self.yCoordinate < 0:
fallingStars.remove(self)
# Class that creates a star object
class lStar:
def __init__(self, starSize, xCoordinate, yCoordinate, starColor, fallSpeed, fallDirection):
self.starSize = starSize
self.xCoordinate = xCoordinate
self.yCoordinate = yCoordinate
self.starColor = starColor
self.fallSpeed = fallSpeed
self.fallDirection = fallDirection
def fall(self):
self.yCoordinate += self.fallDirection
self.xCoordinate -= self.fallSpeed
pygame.draw.rect(gameScreen, self.starColor, [self.xCoordinate, self.yCoordinate, self.starSize, self.starSize])
if self.xCoordinate < 0:
fallingStars.remove(self)
# Class that creates a star object
class rStar:
def __init__(self, starSize, xCoordinate, yCoordinate, starColor, fallSpeed, fallDirection):
self.starSize = starSize
self.xCoordinate = xCoordinate
self.yCoordinate = yCoordinate
self.starColor = starColor
self.fallSpeed = fallSpeed
self.fallDirection = fallDirection
def fall(self):
self.yCoordinate += self.fallDirection
self.xCoordinate += self.fallSpeed
pygame.draw.rect(gameScreen, self.starColor, [self.xCoordinate, self.yCoordinate, self.starSize, self.starSize])
if self.xCoordinate > display_width:
fallingStars.remove(self)
# Colors
white = (255, 255, 255)
darkGray = (50, 50, 50)
darkerGray = (25, 25, 25)
darkestGray = (10, 10, 10)
lightGray = (150, 150, 150)
rLightGray = (200, 200, 200)
rrLightGray = (220, 220, 220)
black = (0, 0, 0)
red = (245, 0, 0)
darkRed = (150, 0, 0)
green = (0, 235, 0)
darkGreen = (0, 150, 0)
lightBlue = (55, 210, 225)
blue = (0, 0, 215)
darkBlue = (0, 0, 115)
pink = (225, 55, 135)
# List of colors
colorList = []
colorList.append(darkerGray)
colorList.append(darkestGray)
colorList.append(lightGray)
colorList.append(rLightGray)
colorList.append(rrLightGray)
colorList.append(lightBlue)
# Clock and FPS stuff
clock = pygame.time.Clock()
# List to maintain star objects
fallingStars = []
# variables for the while loop... 1's and 0's work too
starFall = True
makeStars = True
# Main loop for the falling star effect
while starFall:
# refresh rate of gameScreen (times per second)
clock.tick(60)
# make the 'close'/'x' button work
for event in pygame.event.get():
if event.type == pygame.QUIT:
starFall = False
sys.exit()
# background color, drawn before the stars each time
gameScreen.fill(darkGray)
# keep making the stars...
if makeStars:
# stars going down
fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
# stars going up
fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), random.randrange(1, display_width), display_height + 5, colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#stars going left
# Class that creates a star object
#class lStar:
# def __init__(self, starSize, xCoordinate, yCoordinate, starColor, fallSpeed, fallDirection):
fallingStars.append(lStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
fallingStars.append(lStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(lStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), display_width + 5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#stars going right
# Class that creates a star object
#class rStar:
# def __init__(self, starSize, xCoordinate, yCoordinate, starColor, fallSpeed, fallDirection):
fallingStars.append(rStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
fallingStars.append(rStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(rStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-2, 2)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(upStar(random.randrange(1, 20), -5, random.randrange(1, display_height), colorList[random.randrange(0, 6)], random.randrange(1, 10), random.randrange(-3, 3)))
#fallingStars.append(Star(random.randrange(1, 25), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 30)))
#fallingStars.append(Star(random.randrange(1, 25), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 30)))
#fallingStars.append(Star(random.randrange(1, 25), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 30)))
#fallingStars.append(Star(random.randrange(1, 25), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 30)))
#fallingStars.append(Star(random.randrange(1, 25), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 30)))
# for every star object in the list, run the fall function (make 'em "move")
for i in fallingStars:
i.fall()
#print(len(fallingStars))
# if the list is too big, remove the first item
# for the computer's sake
if len(fallingStars) > 10000:
del fallingStars[0]
# draw the screen
pygame.display.update()
# That's all, folks!
| {
"repo_name": "burleyinnersbm07/python_fallingStars",
"path": "FallingStars.py",
"copies": "1",
"size": "16995",
"license": "mit",
"hash": -6557350294473700000,
"line_mean": 60.3537906137,
"line_max": 202,
"alpha_frac": 0.6955575169,
"autogenerated": false,
"ratio": 3.296799224054316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9459574046002938,
"avg_score": 0.006556538990275649,
"num_lines": 277
} |
# A simple program that resembles the falling of stars or snow on a screen
# Coded in Python 2.7.10 with PyGame
# by Brett Burley-Inners :: 11/7/2015
import pygame, time, random, sys
pygame.init()
# Default dimensions of the game window (px) test
display_width = 320
display_height = 240
# Create a canvas to display the game on
gameScreen = pygame.display.set_mode((display_width, display_height))
# Title of the game Window
pygame.display.set_caption('Avoid the Falling Stuff')
# This is the player. He is a square. #sadlife
class Player:
def __init__(self, playerSize, xPosition, yPosition, playerColor, display_width):
self.xPosition = xPosition
self.yPosition = yPosition
self.playerSize = playerSize
self.playerColor = playerColor
self.display_width = display_width
pygame.draw.rect(gameScreen, self.playerColor, [self.xPosition, self.yPosition, self.playerSize, self.playerSize])
def getPlayerSize(self):
return self.playerSize
def getPlayerX(self):
return self.xPosition
def getPlayerY(self):
return self.yPosition
def redrawPlayer(self, newXPosition):
self.xPosition = newXPosition
pygame.draw.rect(gameScreen, self.playerColor, [self.xPosition, self.yPosition, self.playerSize, self.playerSize])
def isOverLeftBound(self):
if self.xPosition <= 0:
return True
else:
return False
def isOverRightBound(self):
if self.xPosition >= self.display_width - self.playerSize:
return True
else:
return False
# Class that creates a star object
class Star:
def __init__(self, starSize, xCoordinate, yCoordinate, starColor, fallSpeed, fallDirection, score):
self.starSize = starSize
self.xCoordinate = xCoordinate
self.yCoordinate = yCoordinate
self.starColor = starColor
self.fallSpeed = fallSpeed
self.fallDirection = fallDirection
self.score = 0
def fall(self):
self.yCoordinate += self.fallSpeed
self.xCoordinate += self.fallDirection
pygame.draw.rect(gameScreen, self.starColor, [self.xCoordinate, self.yCoordinate, self.starSize, self.starSize])
if self.yCoordinate > display_height:
fallingStars.remove(self)
self.score += 1
def returnScore(self):
return self.score
def collideWithPlayer(self, objectX, objectY, objectSize):
if self.yCoordinate + self.starSize >= objectY and self.yCoordinate <= objectY + objectSize:
if self.xCoordinate >= objectX and self.xCoordinate + self.starSize <= objectX + objectSize:
return True
if self.yCoordinate + self.starSize >= objectY and self.yCoordinate <= objectY + objectSize:
if self.xCoordinate <= objectX + objectSize and self.xCoordinate + self.starSize >= objectX:
return True
else:
return False
font = pygame.font.SysFont(None, 25)
# Colors
white = (255, 255, 255)
darkGray = (50, 50, 50)
darkerGray = (25, 25, 25)
darkestGray = (10, 10, 10)
lightGray = (150, 150, 150)
rLightGray = (200, 200, 200)
rrLightGray = (220, 220, 220)
black = (0, 0, 0)
red = (245, 0, 0)
darkRed = (150, 0, 0)
green = (0, 235, 0)
darkGreen = (0, 150, 0)
lightBlue = (55, 210, 225)
blue = (0, 0, 215)
darkBlue = (0, 0, 115)
pink = (225, 55, 135)
# List of colors
colorList = []
colorList.append(darkerGray)
colorList.append(darkestGray)
colorList.append(lightGray)
colorList.append(rLightGray)
colorList.append(rrLightGray)
colorList.append(lightBlue)
# Game clock
clock = pygame.time.Clock()
# List to maintain star objects
fallingStars = []
clockTickTimer = 0
# Booleans for the game loop(s)
RUNNING = True
makeStars = True
score = 0
xChange = 0
xPosition = display_width / 2
size = 20
pygame.key.set_repeat(1, 5)
player = Player(30, xPosition, display_height - 50, pink, display_width)
# Main loop to run the game
while RUNNING:
# refresh rate of gameScreen (times per second)
clock.tick(30)
# make the 'close'/'x' button work
for event in pygame.event.get():
if event.type == pygame.QUIT:
starFall = False
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT and not isOverLeftBound:
xChange -= 10
#print("left")
if event.key == pygame.K_RIGHT and not isOverRightBound:
xChange += 10
#print("right")
# background color, first thing drawn
gameScreen.fill(darkGray)
#print(clock.tick())
font = pygame.font.SysFont("monospace", 25)
message = font.render(str(score), True, lightGray)
gameScreen.blit(message, (15, 15))
clockTickTimer += 1
#print (clock.get_fps())
xPosition += xChange
#print(xPosition)
player.redrawPlayer(xPosition)
isOverLeftBound = player.isOverLeftBound()
isOverRightBound = player.isOverRightBound()
#print(isOverLeftBound)
#print(isOverRightBound)
xChange = 0
#print(xPosition)
#print(display_width)
# loop to constantly generate stars
if makeStars and clockTickTimer > 20:
# make a star
fallingStars.append(Star(random.randrange(1, 20), random.randrange(1, display_width), -5, colorList[random.randrange(0, 6)], random.randrange(1, 2), random.randrange(-1, 2)/2, score))
clockTickTimer = 0
# make all of the stars fall
for i in fallingStars:
i.fall()
score += i.returnScore()
#print(score)
#print(len(fallingStars))
# if the list is too big, remove the first item
# for the computer's sake
if len(fallingStars) > 10000:
del fallingStars[0]
if i.collideWithPlayer(player.getPlayerX(), player.getPlayerY(), player.getPlayerSize()):
makeStars = False
del fallingStars[:]
# refresh/update the screen
pygame.display.update()
# That's all, folks!
| {
"repo_name": "burleyinnersbm07/DontTouchThat",
"path": "DontTouchThat.py",
"copies": "1",
"size": "6041",
"license": "mit",
"hash": 8681486047025737000,
"line_mean": 28.1835748792,
"line_max": 191,
"alpha_frac": 0.651878828,
"autogenerated": false,
"ratio": 3.506094022054556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9639097542664432,
"avg_score": 0.0037750614780248134,
"num_lines": 207
} |
"""A simple "pull API" for HTML parsing, after Perl's HTML::TokeParser.
Examples
This program extracts all links from a document. It will print one
line for each link, containing the URL and the textual description
between the <A>...</A> tags:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
for token in p.tags("a"):
if token.type == "endtag": continue
url = dict(token.attrs).get("href", "-")
text = p.get_compressed_text(endat=("endtag", "a"))
print "%s\t%s" % (url, text)
This program extracts the <TITLE> from the document:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
if p.get_tag("title"):
title = p.get_compressed_text()
print "Title: %s" % title
Copyright 2003-2006 John J. Lee <jjl@pobox.com>
Copyright 1998-2001 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses.
"""
import re, htmlentitydefs
import sgmllib, HTMLParser
from xml.sax import saxutils
from _html import unescape, unescape_charref
class NoMoreTokensError(Exception): pass
class Token:
"""Represents an HTML tag, declaration, processing instruction etc.
Behaves as both a tuple-like object (ie. iterable) and has attributes
.type, .data and .attrs.
>>> t = Token("starttag", "a", [("href", "http://www.python.org/")])
>>> t == ("starttag", "a", [("href", "http://www.python.org/")])
True
>>> (t.type, t.data) == ("starttag", "a")
True
>>> t.attrs == [("href", "http://www.python.org/")]
True
Public attributes
type: one of "starttag", "endtag", "startendtag", "charref", "entityref",
"data", "comment", "decl", "pi", after the corresponding methods of
HTMLParser.HTMLParser
data: For a tag, the tag name; otherwise, the relevant data carried by the
tag, as a string
attrs: list of (name, value) pairs representing HTML attributes
(or None if token does not represent an opening tag)
"""
def __init__(self, type, data, attrs=None):
self.type = type
self.data = data
self.attrs = attrs
def __iter__(self):
return iter((self.type, self.data, self.attrs))
def __eq__(self, other):
type, data, attrs = other
if (self.type == type and
self.data == data and
self.attrs == attrs):
return True
else:
return False
def __ne__(self, other): return not self.__eq__(other)
def __repr__(self):
args = ", ".join(map(repr, [self.type, self.data, self.attrs]))
return self.__class__.__name__+"(%s)" % args
def __str__(self):
"""
>>> print Token("starttag", "br")
<br>
>>> print Token("starttag", "a",
... [("href", "http://www.python.org/"), ("alt", '"foo"')])
<a href="http://www.python.org/" alt='"foo"'>
>>> print Token("startendtag", "br")
<br />
>>> print Token("startendtag", "br", [("spam", "eggs")])
<br spam="eggs" />
>>> print Token("endtag", "p")
</p>
>>> print Token("charref", "38")
&
>>> print Token("entityref", "amp")
&
>>> print Token("data", "foo\\nbar")
foo
bar
>>> print Token("comment", "Life is a bowl\\nof cherries.")
<!--Life is a bowl
of cherries.-->
>>> print Token("decl", "decl")
<!decl>
>>> print Token("pi", "pi")
<?pi>
"""
if self.attrs is not None:
attrs = "".join([" %s=%s" % (k, saxutils.quoteattr(v)) for
k, v in self.attrs])
else:
attrs = ""
if self.type == "starttag":
return "<%s%s>" % (self.data, attrs)
elif self.type == "startendtag":
return "<%s%s />" % (self.data, attrs)
elif self.type == "endtag":
return "</%s>" % self.data
elif self.type == "charref":
return "&#%s;" % self.data
elif self.type == "entityref":
return "&%s;" % self.data
elif self.type == "data":
return self.data
elif self.type == "comment":
return "<!--%s-->" % self.data
elif self.type == "decl":
return "<!%s>" % self.data
elif self.type == "pi":
return "<?%s>" % self.data
assert False
def iter_until_exception(fn, exception, *args, **kwds):
while 1:
try:
yield fn(*args, **kwds)
except exception:
raise StopIteration
class _AbstractParser:
chunk = 1024
compress_re = re.compile(r"\s+")
def __init__(self, fh, textify={"img": "alt", "applet": "alt"},
encoding="ascii", entitydefs=None):
"""
fh: file-like object (only a .read() method is required) from which to
read HTML to be parsed
textify: mapping used by .get_text() and .get_compressed_text() methods
to represent opening tags as text
encoding: encoding used to encode numeric character references by
.get_text() and .get_compressed_text() ("ascii" by default)
entitydefs: mapping like {"amp": "&", ...} containing HTML entity
definitions (a sensible default is used). This is used to unescape
entities in .get_text() (and .get_compressed_text()) and attribute
values. If the encoding can not represent the character, the entity
reference is left unescaped. Note that entity references (both
numeric - e.g. { or ઼ - and non-numeric - e.g. &) are
unescaped in attribute values and the return value of .get_text(), but
not in data outside of tags. Instead, entity references outside of
tags are represented as tokens. This is a bit odd, it's true :-/
If the element name of an opening tag matches a key in the textify
mapping then that tag is converted to text. The corresponding value is
used to specify which tag attribute to obtain the text from. textify
maps from element names to either:
- an HTML attribute name, in which case the HTML attribute value is
used as its text value along with the element name in square
brackets (eg."alt text goes here[IMG]", or, if the alt attribute
were missing, just "[IMG]")
- a callable object (eg. a function) which takes a Token and returns
the string to be used as its text value
If textify has no key for an element name, nothing is substituted for
the opening tag.
Public attributes:
encoding and textify: see above
"""
self._fh = fh
self._tokenstack = [] # FIFO
self.textify = textify
self.encoding = encoding
if entitydefs is None:
entitydefs = htmlentitydefs.name2codepoint
self._entitydefs = entitydefs
def __iter__(self): return self
def tags(self, *names):
return iter_until_exception(self.get_tag, NoMoreTokensError, *names)
def tokens(self, *tokentypes):
return iter_until_exception(self.get_token, NoMoreTokensError,
*tokentypes)
def next(self):
try:
return self.get_token()
except NoMoreTokensError:
raise StopIteration()
def get_token(self, *tokentypes):
"""Pop the next Token object from the stack of parsed tokens.
If arguments are given, they are taken to be token types in which the
caller is interested: tokens representing other elements will be
skipped. Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
while self._tokenstack:
token = self._tokenstack.pop(0)
if tokentypes:
if token.type in tokentypes:
return token
else:
return token
data = self._fh.read(self.chunk)
if not data:
raise NoMoreTokensError()
self.feed(data)
def unget_token(self, token):
"""Push a Token back onto the stack."""
self._tokenstack.insert(0, token)
def get_tag(self, *names):
"""Return the next Token that represents an opening or closing tag.
If arguments are given, they are taken to be element names in which the
caller is interested: tags representing other elements will be skipped.
Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
tok = self.get_token()
if tok.type not in ["starttag", "endtag", "startendtag"]:
continue
if names:
if tok.data in names:
return tok
else:
return tok
def get_text(self, endat=None):
"""Get some text.
endat: stop reading text at this tag (the tag is included in the
returned text); endtag is a tuple (type, name) where type is
"starttag", "endtag" or "startendtag", and name is the element name of
the tag (element names must be given in lower case)
If endat is not given, .get_text() will stop at the next opening or
closing tag, or when there are no more tokens (no exception is raised).
Note that .get_text() includes the text representation (if any) of the
opening tag, but pushes the opening tag back onto the stack. As a
result, if you want to call .get_text() again, you need to call
.get_tag() first (unless you want an empty string returned when you
next call .get_text()).
Entity references are translated using the value of the entitydefs
constructor argument (a mapping from names to characters like that
provided by the standard module htmlentitydefs). Named entity
references that are not in this mapping are left unchanged.
The textify attribute is used to translate opening tags into text: see
the class docstring.
"""
text = []
tok = None
while 1:
try:
tok = self.get_token()
except NoMoreTokensError:
# unget last token (not the one we just failed to get)
if tok: self.unget_token(tok)
break
if tok.type == "data":
text.append(tok.data)
elif tok.type == "entityref":
t = unescape("&%s;"%tok.data, self._entitydefs, self.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, self.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type in ["starttag", "startendtag"]:
alt = self.textify.get(tag_name)
if alt is not None:
if callable(alt):
text.append(alt(tok))
elif tok.attrs is not None:
for k, v in tok.attrs:
if k == alt:
text.append(v)
text.append("[%s]" % tag_name.upper())
if endat is None or endat == (tok.type, tag_name):
self.unget_token(tok)
break
return "".join(text)
def get_compressed_text(self, *args, **kwds):
"""
As .get_text(), but collapses each group of contiguous whitespace to a
single space character, and removes all initial and trailing
whitespace.
"""
text = self.get_text(*args, **kwds)
text = text.strip()
return self.compress_re.sub(" ", text)
def handle_startendtag(self, tag, attrs):
self._tokenstack.append(Token("startendtag", tag, attrs))
def handle_starttag(self, tag, attrs):
self._tokenstack.append(Token("starttag", tag, attrs))
def handle_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def handle_charref(self, name):
self._tokenstack.append(Token("charref", name))
def handle_entityref(self, name):
self._tokenstack.append(Token("entityref", name))
def handle_data(self, data):
self._tokenstack.append(Token("data", data))
def handle_comment(self, data):
self._tokenstack.append(Token("comment", data))
def handle_decl(self, decl):
self._tokenstack.append(Token("decl", decl))
def unknown_decl(self, data):
# XXX should this call self.error instead?
#self.error("unknown declaration: " + `data`)
self._tokenstack.append(Token("decl", data))
def handle_pi(self, data):
self._tokenstack.append(Token("pi", data))
def unescape_attr(self, name):
return unescape(name, self._entitydefs, self.encoding)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
escaped_attrs.append((key, self.unescape_attr(val)))
return escaped_attrs
class PullParser(_AbstractParser, HTMLParser.HTMLParser):
def __init__(self, *args, **kwds):
HTMLParser.HTMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
class TolerantPullParser(_AbstractParser, sgmllib.SGMLParser):
def __init__(self, *args, **kwds):
sgmllib.SGMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unknown_starttag(self, tag, attrs):
attrs = self.unescape_attrs(attrs)
self._tokenstack.append(Token("starttag", tag, attrs))
def unknown_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def _test():
import doctest, _pullparser
return doctest.testmod(_pullparser)
if __name__ == "__main__":
_test()
| {
"repo_name": "deanhiller/databus",
"path": "webapp/play1.3.x/samples-and-tests/i-am-a-developer/mechanize/_pullparser.py",
"copies": "15",
"size": "14326",
"license": "mpl-2.0",
"hash": 4533353619315691500,
"line_mean": 35.7333333333,
"line_max": 79,
"alpha_frac": 0.5743403602,
"autogenerated": false,
"ratio": 4.08963745361119,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A simple Python callstack sampler."""
import contextlib
import datetime
import signal
import traceback
class CallstackSampler(object):
"""A simple signal-based Python callstack sampler.
"""
def __init__(self, interval=None):
self.stacks = []
self.interval = 0.001 if interval is None else interval
def _sample(self, signum, frame):
"""Samples the current stack."""
del signum
stack = traceback.extract_stack(frame)
formatted_stack = []
formatted_stack.append(datetime.datetime.utcnow())
for filename, lineno, function_name, text in stack:
formatted_frame = '{}:{}({})({})'.format(filename, lineno, function_name,
text)
formatted_stack.append(formatted_frame)
self.stacks.append(formatted_stack)
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
@contextlib.contextmanager
def profile(self):
signal.signal(signal.SIGVTALRM, self._sample)
signal.setitimer(signal.ITIMER_VIRTUAL, self.interval, 0)
try:
yield
finally:
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
def save(self, fname):
with open(fname, 'w') as f:
for s in self.stacks:
for l in s:
f.write('%s\n' % l)
f.write('\n')
@contextlib.contextmanager
def callstack_sampling(filename, interval=None):
"""Periodically samples the Python callstack.
Args:
filename: the filename
interval: the sampling interval, in seconds. Defaults to 0.001.
Yields:
nothing
"""
sampler = CallstackSampler(interval=interval)
with sampler.profile():
yield
sampler.save(filename)
| {
"repo_name": "tombstone/models",
"path": "official/utils/misc/callstack_sampler.py",
"copies": "6",
"size": "1640",
"license": "apache-2.0",
"hash": -9065000327958715000,
"line_mean": 25.4516129032,
"line_max": 79,
"alpha_frac": 0.6615853659,
"autogenerated": false,
"ratio": 3.8770685579196216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025369854625517583,
"num_lines": 62
} |
# A simple Pythone program to analysis addon _common directory and copy each files to firefox or chrome directory to form different addons
#coding=utf-8
import os,shutil
import sys
import datetime
SOURCE_DIR = '_common'
# Different build target configs
# file mark is used to set file specified to single platform. e.g. myscript.fx.js will only be copied to Firefox dir and the name will be changed to myscript.js
class TargetConfig:
name = ''
directory = ''
fileMark = ''
def __init__(self, name, dir, fileMark):
self.name = name
self.directory = dir
self.fileMark = fileMark
def getExtMark(self):
return '.' + self.fileMark
ignoreConfig = TargetConfig('ignore', '', 'ignore') #special, ignore config, do not need to copy
firefoxConfig = TargetConfig('firefox', os.path.join('firefox', 'kekule'), 'fx')
chromeConfig = TargetConfig('chrome', os.path.join('chrome', 'kekule'), 'cr')
allConfigs = [ignoreConfig, firefoxConfig, chromeConfig]
#builder class
class Builder:
srcDir = ''
targets = []
def __init__(self, srcDir, targets):
self.srcDir = srcDir
self.targets = targets
# build all targets
def build(self):
allExtMarks = self.getAllTargetExtMarks()
for t in self.targets:
print('handle target', t.name)
self.iteratePath(self.srcDir, t.directory, t.getExtMark(), allExtMarks)
# Copy files to a specified target
def buildTarget(self, targetConfig, allTargetExtMarks):
srcRootDir = self.srcDir
targetRootDir = targetConfig.directory
if not os.path.isdir(srcRootDir) or not os.path.isdir(targetRootDir):
return False
self.iteratePath(srcRootDir, targetRootDir, targetConfig.getExtMark(), allTargetExtMarks)
def getAllTargetExtMarks(self):
global allConfigs
result = []
for c in allConfigs:
result.append(c.getExtMark())
return result
# extract all exts from file name, returns an array. e.g. 'file.ext1.ext2' will returns ['file', 'ext1', 'ext2']
def splitAllExts(self, filename):
result = os.path.splitext(filename)
ext = result[1]
if ext == '': # no ext
ret = [result[0]]
else:
ret = self.splitAllExts(result[0])
ret.append(ext)
#print('split', filename, ret)
return ret
# Analysis target mark of a file, returns a tuple (fileNameWithoutMark, extMark). If no mark found, extMark will be set to None.
def analysisFileExtMark(self, filename, allTargetExtMarks):
result = self.splitAllExts(filename)
length = len(result)
if length == 1: # no ext
return (filename, None)
elif length == 2: # only one ext, usually not marked
ext = result[1]
try:
index = allTargetExtMarks.index(ext)
except:
index = -1
if index >= 0:
return (result[0], ext)
else:
return (filename, None)
else: # more than one, usually regard the second ext to the end as mark
print('result', result)
extIndex = len(result) - 2
ext = result[extIndex]
try:
index = allTargetExtMarks.index(ext)
except:
index = -1
if index >= 0:
result.pop(extIndex)
fname = ''
for s in result:
fname = fname + s
return (fname, ext)
else:
return (filename, None)
def iteratePath(self, srcDir, targetDir, targetExtMark, allExtMarks):
print('iterate path', srcDir, targetDir)
if not os.path.isdir(targetDir): # target not exists, create
print('create target path', targetDir)
os.makedirs(targetDir)
for file in os.listdir(srcDir):
fileAnalysisResult = self.analysisFileExtMark(file, allExtMarks)
extMark = fileAnalysisResult[1]
coreFileName = fileAnalysisResult[0]
srcFileName = os.path.join(srcDir, file)
print('curr file', file, extMark, coreFileName)
if extMark != None: # has ext mark, handle
if targetExtMark != extMark: # not for this target, bypass
continue
if os.path.isdir(srcFileName):
newSrcDir = srcFileName
newTargetDir = os.path.join(targetDir, coreFileName)
print('copy path', newSrcDir, newTargetDir)
self.iteratePath(newSrcDir, newTargetDir, targetExtMark, allExtMarks)
elif os.path.isfile(srcFileName):
targetFileName = os.path.join(targetDir, coreFileName)
# if os.path.isfile(targetFileName) # file already exists
shutil.copy(srcFileName, targetFileName)
print('copy file', srcFileName, targetFileName)
# run
print('====begin======')
# get args
argCount = len(sys.argv)
configs = []
if (argCount <= 1): # no extra arg
configs = allConfigs
else: # specified the target
targetName = sys.argv[1]
for c in allConfigs:
if c.name == targetName:
configs = [c]
break
#print('curr targets', configs)
builder = Builder(SOURCE_DIR, configs)
builder.build()
#print(builder.analysisFileExtMark('chemObjImport.addon.fx.js', ['.fx', '.cr']))
print('====end======') | {
"repo_name": "partridgejiang/Kekule.js",
"path": "src/_extras/browserAddOns/build.py",
"copies": "2",
"size": "4779",
"license": "mit",
"hash": 6141471993913872000,
"line_mean": 29.253164557,
"line_max": 160,
"alpha_frac": 0.6913580247,
"autogenerated": false,
"ratio": 3.181757656458056,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.937548143090121,
"avg_score": 0.09952685005136917,
"num_lines": 158
} |
"""A simple python library for getting caller ID name information."""
from re import sub
from requests import get
from requests.exceptions import ConnectionError, Timeout
from .errors import InvalidPhoneNumberError
class Phone(object):
"""An object that holds a single phone's information.
:attr str OPENCNAM_API_URL: Current OpenCNAM API endpoint.
:attr str number: The validated 10-digit US phone number.
:attr str cnam: The caller ID name for this phone.
:attr str account_sid: Your Account SID.
:attr str auth_token: Your Auth Token.
"""
OPENCNAM_API_URL = 'https://api.opencnam.com/v3/phone/%s'
def __init__(self, number, account_sid, auth_token, cnam=''):
"""Create a new Phone object, and attempt to lookup the caller ID name
information using opencnam's public API.
:param str number: The phone number to query in any format.
:param str cnam: If you'd like to manually set the caller ID name
for this phone number, you can do so here.
:param str account_sid: Your Account SID (found in the OpenCNAM dashboard).
:param str auth_token: Your Auth Token (found in the OpenCNAM dashboard).
Usage::
from opencnam import Phone
phone = Phone('+18182179229')
...
"""
self.cnam = cnam
self.number = number
self.account_sid = account_sid
self.auth_token = auth_token
# Attempt to grab the caller ID name information from opencnam. If we
# can't get a caller ID response back, we won't retry (developers can
# manually retry to grab the caller ID at any time using ``get_cnam``).
self.get_cnam()
def get_cnam(self):
"""Query the OpenCNAM API and retreive the caller ID name string
associated with this phone.
Once we've got a valid caller ID name for this phone number, we'll
cache that name for future reference.
"""
if not self.cnam:
params = {'format': 'pbx'}
# If the user supplied API creds, use them.
if self.account_sid and self.auth_token:
params['account_sid'] = self.account_sid
params['auth_token'] = self.auth_token
try:
response = get(self.OPENCNAM_API_URL % self.number, params=params, timeout=3)
if response.status_code == 200:
self.cnam = str(response.text)
except (ConnectionError, Timeout):
pass
| {
"repo_name": "telephonyresearch/python-opencnam",
"path": "opencnam/opencnam.py",
"copies": "1",
"size": "2551",
"license": "unlicense",
"hash": -7880276795674655000,
"line_mean": 35.4428571429,
"line_max": 93,
"alpha_frac": 0.6232849863,
"autogenerated": false,
"ratio": 4.2026359143327845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004962597389760263,
"num_lines": 70
} |
"""A simple python module to add a retry function decorator"""
import functools
import itertools
import logging
import os
from select import poll, POLLIN
from threading import Timer
import time
from decorator import decorator
class _DummyException(Exception):
pass
class MaximumRetriesExceeded(Exception):
pass
class MaximumTimeoutExceeded(Exception):
pass
def _timeout(pipe_w):
with os.fdopen(pipe_w, 'w') as p:
p.write('stop')
def retry(
exceptions=(Exception,), interval=0, max_retries=10, success=None,
timeout=-1):
"""Decorator to retry a function 'max_retries' amount of times
:param tuple exceptions: Exceptions to be caught for retries
:param int interval: Interval between retries in seconds
:param int max_retries: Maximum number of retries to have, if
set to -1 the decorator will loop forever
:param function success: Function to indicate success criteria
:param int timeout: Timeout interval in seconds, if -1 will retry forever
:raises MaximumRetriesExceeded: Maximum number of retries hit without
reaching the success criteria
:raises TypeError: Both exceptions and success were left None causing the
decorator to have no valid exit criteria.
Example:
Use it to decorate a function!
.. sourcecode:: python
from retry import retry
@retry(exceptions=(ArithmeticError,), success=lambda x: x > 0)
def foo(bar):
if bar < 0:
raise ArithmeticError('testing this')
return bar
foo(5)
# Should return 5
foo(-1)
# Should raise ArithmeticError
foo(0)
# Should raise MaximumRetriesExceeded
"""
if not exceptions and success is None:
raise TypeError(
'`exceptions` and `success` parameter can not both be None')
# For python 3 compatability
exceptions = exceptions or (_DummyException,)
_retries_error_msg = ('Exceeded maximum number of retries {} at '
'an interval of {}s for function {}')
_timeout_error_msg = 'Maximum timeout of {}s reached for function {}'
@decorator
def wrapper(func, *args, **kwargs):
run_func = functools.partial(func, *args, **kwargs)
logger = logging.getLogger(func.__module__)
if max_retries < 0:
iterator = itertools.count()
else:
iterator = range(max_retries)
timer = None
if timeout > 0:
r, w = os.pipe()
timer = Timer(timeout, _timeout, [w])
timer.start()
p = poll()
p.register(r, POLLIN)
for num, _ in enumerate(iterator, 1):
try:
result = run_func()
if success is None or success(result):
if timer:
timer.cancel()
return result
except exceptions:
logger.exception(
'Exception experienced when trying function {}'.format(
func.__name__))
if num == max_retries:
raise
logger.warning(
'Retrying {} in {}s...'.format(
func.__name__, interval))
if timer:
r_state = p.poll(interval * 1000)
if r_state and r_state[0][1] & POLLIN:
raise MaximumTimeoutExceeded(
_timeout_error_msg.format(timeout, func.__name__)
)
else:
time.sleep(interval)
else:
raise MaximumRetriesExceeded(
_retries_error_msg.format(
max_retries, interval, func.__name__))
return wrapper
| {
"repo_name": "seemethere/retry.it",
"path": "retry.py",
"copies": "1",
"size": "3885",
"license": "mit",
"hash": -3026290403476526000,
"line_mean": 31.6470588235,
"line_max": 77,
"alpha_frac": 0.5608751609,
"autogenerated": false,
"ratio": 4.880653266331659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 119
} |
"""A simple Python object browser.
This will create a tree that can be used to browse the objects in a given
Python namespace. Like Console.py, this is a demo only, and needs a lot
of work before it would be a truly valuable tool.
"""
from pawt import swing
from types import *
import java
leaves = (None, TypeType, IntType, StringType, FloatType, NoneType,
BuiltinFunctionType, BuiltinMethodType)
class PyEnumeration(java.util.Enumeration):
def __init__(self, seq):
self.seq = seq
self.index = 0
def hasMoreElements(self):
return self.index < len(self.seq)
def nextElement(self):
self.index = self.index+1
return self.seq[self.index-1]
def classattrs(c, attrs):
for base in c.__bases__:
classattrs(base, attrs)
for name in c.__dict__.keys():
attrs[name] = 1
def mydir(obj):
attrs = {}
if hasattr(obj, '__class__'):
classattrs(obj.__class__, attrs)
if hasattr(obj, '__dict__'):
for name in obj.__dict__.keys():
attrs[name] = 1
ret = attrs.keys()
ret.sort()
return ret
def shortrepr(obj):
r = repr(obj)
if len(r) > 80:
r = r[:77]+"..."
return r
class ObjectNode(swing.tree.TreeNode):
def __init__(self, parent, name, object):
self.myparent = parent
self.name = name
self.object = object
def getChildren(self):
if hasattr(self, 'mychildren'):
return self.mychildren
if self.isLeaf():
self.mychildren = None
return None
children = []
for name in mydir(self.object):
if name[:2] == '__':
continue
try:
children.append(ObjectNode(self, name,
getattr(self.object, name)))
except TypeError:
print 'type error on', name, self.object
self.mychildren = children
return children
def children(self):
return PyEnumeration(self.getChildren())
def getAllowsChildren(self):
return not self.isLeaf()
def isLeaf(self):
if hasattr(self.object, '__class__'):
myclass = self.object.__class__
else:
myclass = None
return myclass in leaves
def getChildAt(self, i):
return self.getChildren()[i]
def getChildCount(self):
return len(self.getChildren())
def getIndex(self, node):
index = 0
for child in self.getChildren():
if child == node:
return index
index = index+1
return -1
def getParent(self):
return self.myparent
def toString(self):
return self.name+' = '+shortrepr(self.object)
if __name__ == '__main__':
class foo:
bar=99
eggs='hello'
class baz:
x,y,z=1,2,3
func = range
import __main__
f = foo()
f.pyfunc = mydir
root = ObjectNode(None, 'foo', __main__)
tree = swing.JTree(root)
swing.test(swing.JScrollPane(tree))
| {
"repo_name": "JaDogg/__py_playground",
"path": "reference/examples-v3/JavaScript/python/tests/jython/ObjectTree.py",
"copies": "2",
"size": "3026",
"license": "mit",
"hash": -1167417185375021800,
"line_mean": 21.25,
"line_max": 73,
"alpha_frac": 0.570720423,
"autogenerated": false,
"ratio": 3.8062893081761007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.53770097311761,
"avg_score": null,
"num_lines": null
} |
""" A simple python script that uses the leastsq function from SciPy to carry out a nonlinear
regression. Outputs a bunch of different measures to be used in judging the fit. Requires an input
file with x and y columns, no headings. Pass filename as an argument.
"""
import numpy, sys
from scipy.optimize import leastsq
# Define the function here, using p as an array of parameters. Functions must be able to take arrays
# as arguments (use numpy version of exp, for example).
def func(p, x):
return (p[0] - p[1])*(1+(1-numpy.sqrt(8*p[2]*x+1))/(4*p[2]*x)) + p[1]
# Initial guesses for the parameters.
p0 = [10, 5, 600] # Pd Pm Ke
datafile = open(sys.argv[1])
x, y = numpy.array([]), numpy.array([])
for line in datafile:
curline = line.replace("\n", "").split() # Splits at any whitespace.
x = numpy.append(x, float(curline[0]))
y = numpy.append(y, float(curline[1]))
# Defines the error function for leastsq. In for regression, it is just the residuals.
def func_res(p, x, y):
return y - func(p, x)
dof = len(x) - len(p0) # Degrees of freedom
fit_parameters, covariance_matrix, info, msg, success \
= leastsq(func_res, p0, args=(x,y), full_output=True)
sum_squares_residuals = sum(info["fvec"]*info["fvec"])
sum_squares_mean_dev = sum((y - numpy.mean(y))**2)
# The errors for each parameter are obtained by multiplying the covariance matrix by the residual
# variance (= sum_squares_residuals / dof).
errors = []
for i in range(len(covariance_matrix)):
errors.append(numpy.sqrt(covariance_matrix[i,i]*sum_squares_residuals/dof))
print("**Regression results for file \"{}\"**".format(sys.argv[1]))
print()
print("Data (x, y, yfit)")
print("=================")
for n in range(len(x)):
print("{}, {}, {}".format(x[n], y[n], y[n] - info["fvec"][n]))
print()
print("Optimized parameters")
print("====================")
for n in range(len(fit_parameters)):
print("{} +/- {}".format(fit_parameters[n], errors[n]))
print()
print("Regression data")
print("===============")
# See leastsq documentation for descriptions of flags.
print("Flag: {}".format(success))
print("Std Deviation of residuals: {}".format(numpy.sqrt(sum_squares_residuals/dof)))
print("chi2 (sum square residuals): {}".format(sum_squares_residuals))
# Ideally, (reduced chi2)/(std dev of measurement) = 1.
print("Reduced chi2 (chi2/dof): {}".format(sum_squares_residuals/dof))
print("R2 = {}".format(1 - sum_squares_residuals/sum_squares_mean_dev))
print("Adjusted R2 = {}".format(1 - (sum_squares_residuals/dof)/(sum_squares_mean_dev/(len(x)-1))))
print("Covariance matrix:")
print(covariance_matrix*sum_squares_residuals/dof)
print("Residuals:")
print(info["fvec"])
| {
"repo_name": "scotthartley/NMR_dilution_fit",
"path": "NMR_dilution_fit.py",
"copies": "1",
"size": "2684",
"license": "mit",
"hash": 8867641672662189000,
"line_mean": 34.7866666667,
"line_max": 100,
"alpha_frac": 0.6717585693,
"autogenerated": false,
"ratio": 3.1173054587688735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9262839999527294,
"avg_score": 0.0052448057083159675,
"num_lines": 75
} |
"""A simple Python template renderer, for a nano-subset of Django syntax."""
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
import re
from coverage.backward import set # pylint: disable=W0622
class CodeBuilder(object):
"""Build source code conveniently."""
def __init__(self, indent=0):
self.code = []
self.indent_amount = indent
def add_line(self, line):
"""Add a line of source to the code.
Don't include indentations or newlines.
"""
self.code.append(" " * self.indent_amount)
self.code.append(line)
self.code.append("\n")
def add_section(self):
"""Add a section, a sub-CodeBuilder."""
sect = CodeBuilder(self.indent_amount)
self.code.append(sect)
return sect
def indent(self):
"""Increase the current indent for following lines."""
self.indent_amount += 4
def dedent(self):
"""Decrease the current indent for following lines."""
self.indent_amount -= 4
def __str__(self):
return "".join([str(c) for c in self.code])
def get_function(self, fn_name):
"""Compile the code, and return the function `fn_name`."""
assert self.indent_amount == 0
g = {}
code_text = str(self)
exec(code_text, g)
return g[fn_name]
class Templite(object):
"""A simple template renderer, for a nano-subset of Django syntax.
Supported constructs are extended variable access::
{{var.modifer.modifier|filter|filter}}
loops::
{% for var in list %}...{% endfor %}
and ifs::
{% if var %}...{% endif %}
Comments are within curly-hash markers::
{# This will be ignored #}
Construct a Templite with the template text, then use `render` against a
dictionary context to create a finished string.
"""
def __init__(self, text, *contexts):
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
These are good for filters and global values.
"""
self.text = text
self.context = {}
for context in contexts:
self.context.update(context)
# We construct a function in source form, then compile it and hold onto
# it, and execute it to render the template.
code = CodeBuilder()
code.add_line("def render(ctx, dot):")
code.indent()
vars_code = code.add_section()
self.all_vars = set()
self.loop_vars = set()
code.add_line("result = []")
code.add_line("a = result.append")
code.add_line("e = result.extend")
code.add_line("s = str")
buffered = []
def flush_output():
"""Force `buffered` to the code builder."""
if len(buffered) == 1:
code.add_line("a(%s)" % buffered[0])
elif len(buffered) > 1:
code.add_line("e([%s])" % ",".join(buffered))
del buffered[:]
# Split the text to form a list of tokens.
toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
ops_stack = []
for tok in toks:
if tok.startswith('{{'):
# An expression to evaluate.
buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip()))
elif tok.startswith('{#'):
# Comment: ignore it and move on.
continue
elif tok.startswith('{%'):
# Action tag: split into words and parse further.
flush_output()
words = tok[2:-2].strip().split()
if words[0] == 'if':
# An if statement: evaluate the expression to determine if.
assert len(words) == 2
ops_stack.append('if')
code.add_line("if %s:" % self.expr_code(words[1]))
code.indent()
elif words[0] == 'for':
# A loop: iterate over expression result.
assert len(words) == 4 and words[2] == 'in'
ops_stack.append('for')
self.loop_vars.add(words[1])
code.add_line(
"for c_%s in %s:" % (
words[1],
self.expr_code(words[3])
)
)
code.indent()
elif words[0].startswith('end'):
# Endsomething. Pop the ops stack
end_what = words[0][3:]
if ops_stack[-1] != end_what:
raise SyntaxError("Mismatched end tag: %r" % end_what)
ops_stack.pop()
code.dedent()
else:
raise SyntaxError("Don't understand tag: %r" % words[0])
else:
# Literal content. If it isn't empty, output it.
if tok:
buffered.append("%r" % tok)
flush_output()
for var_name in self.all_vars - self.loop_vars:
vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name))
if ops_stack:
raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1])
code.add_line("return ''.join(result)")
code.dedent()
self.render_function = code.get_function('render')
def expr_code(self, expr):
"""Generate a Python expression for `expr`."""
if "|" in expr:
pipes = expr.split("|")
code = self.expr_code(pipes[0])
for func in pipes[1:]:
self.all_vars.add(func)
code = "c_%s(%s)" % (func, code)
elif "." in expr:
dots = expr.split(".")
code = self.expr_code(dots[0])
args = [repr(d) for d in dots[1:]]
code = "dot(%s, %s)" % (code, ", ".join(args))
else:
self.all_vars.add(expr)
code = "c_%s" % expr
return code
def render(self, context=None):
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
"""
# Make the complete context we'll use.
ctx = dict(self.context)
if context:
ctx.update(context)
return self.render_function(ctx, self.do_dots)
def do_dots(self, value, *dots):
"""Evaluate dotted expressions at runtime."""
for dot in dots:
try:
value = getattr(value, dot)
except AttributeError:
value = value[dot]
if hasattr(value, '__call__'):
value = value()
return value
| {
"repo_name": "nicolargo/intellij-community",
"path": "python/helpers/coverage/templite.py",
"copies": "160",
"size": "6868",
"license": "apache-2.0",
"hash": 3202730924089401000,
"line_mean": 32.0192307692,
"line_max": 79,
"alpha_frac": 0.5039312755,
"autogenerated": false,
"ratio": 4.195479535736102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A simple Python template renderer, for a nano-subset of Django syntax."""
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
import re, sys
class Templite(object):
"""A simple template renderer, for a nano-subset of Django syntax.
Supported constructs are extended variable access::
{{var.modifer.modifier|filter|filter}}
loops::
{% for var in list %}...{% endfor %}
and ifs::
{% if var %}...{% endif %}
Comments are within curly-hash markers::
{# This will be ignored #}
Construct a Templite with the template text, then use `render` against a
dictionary context to create a finished string.
"""
def __init__(self, text, *contexts):
"""Construct a Templite with the given `text`.
`contexts` are dictionaries of values to use for future renderings.
These are good for filters and global values.
"""
self.text = text
self.context = {}
for context in contexts:
self.context.update(context)
# Split the text to form a list of tokens.
toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
# Parse the tokens into a nested list of operations. Each item in the
# list is a tuple with an opcode, and arguments. They'll be
# interpreted by TempliteEngine.
#
# When parsing an action tag with nested content (if, for), the current
# ops list is pushed onto ops_stack, and the parsing continues in a new
# ops list that is part of the arguments to the if or for op.
ops = []
ops_stack = []
for tok in toks:
if tok.startswith('{{'):
# Expression: ('exp', expr)
ops.append(('exp', tok[2:-2].strip()))
elif tok.startswith('{#'):
# Comment: ignore it and move on.
continue
elif tok.startswith('{%'):
# Action tag: split into words and parse further.
words = tok[2:-2].strip().split()
if words[0] == 'if':
# If: ('if', (expr, body_ops))
if_ops = []
assert len(words) == 2
ops.append(('if', (words[1], if_ops)))
ops_stack.append(ops)
ops = if_ops
elif words[0] == 'for':
# For: ('for', (varname, listexpr, body_ops))
assert len(words) == 4 and words[2] == 'in'
for_ops = []
ops.append(('for', (words[1], words[3], for_ops)))
ops_stack.append(ops)
ops = for_ops
elif words[0].startswith('end'):
# Endsomething. Pop the ops stack
ops = ops_stack.pop()
assert ops[-1][0] == words[0][3:]
else:
raise SyntaxError("Don't understand tag %r" % words)
else:
ops.append(('lit', tok))
assert not ops_stack, "Unmatched action tag: %r" % ops_stack[-1][0]
self.ops = ops
def render(self, context=None):
"""Render this template by applying it to `context`.
`context` is a dictionary of values to use in this rendering.
"""
# Make the complete context we'll use.
ctx = dict(self.context)
if context:
ctx.update(context)
# Run it through an engine, and return the result.
engine = _TempliteEngine(ctx)
engine.execute(self.ops)
return "".join(engine.result)
class _TempliteEngine(object):
"""Executes Templite objects to produce strings."""
def __init__(self, context):
self.context = context
self.result = []
def execute(self, ops):
"""Execute `ops` in the engine.
Called recursively for the bodies of if's and loops.
"""
for op, args in ops:
if op == 'lit':
self.result.append(args)
elif op == 'exp':
try:
self.result.append(str(self.evaluate(args)))
except:
exc_class, exc, _ = sys.exc_info()
new_exc = exc_class("Couldn't evaluate {{ %s }}: %s"
% (args, exc))
raise new_exc
elif op == 'if':
expr, body = args
if self.evaluate(expr):
self.execute(body)
elif op == 'for':
var, lis, body = args
vals = self.evaluate(lis)
for val in vals:
self.context[var] = val
self.execute(body)
else:
raise AssertionError("TempliteEngine doesn't grok op %r" % op)
def evaluate(self, expr):
"""Evaluate an expression.
`expr` can have pipes and dots to indicate data access and filtering.
"""
if "|" in expr:
pipes = expr.split("|")
value = self.evaluate(pipes[0])
for func in pipes[1:]:
value = self.evaluate(func)(value)
elif "." in expr:
dots = expr.split('.')
value = self.evaluate(dots[0])
for dot in dots[1:]:
try:
value = getattr(value, dot)
except AttributeError:
value = value[dot]
if hasattr(value, '__call__'):
value = value()
else:
value = self.context[expr]
return value
| {
"repo_name": "xin3liang/platform_external_chromium_org_third_party_WebKit",
"path": "Tools/Scripts/webkitpy/thirdparty/coverage/templite.py",
"copies": "123",
"size": "5701",
"license": "bsd-3-clause",
"hash": -2725975340414378500,
"line_mean": 33.343373494,
"line_max": 79,
"alpha_frac": 0.4935976145,
"autogenerated": false,
"ratio": 4.440031152647975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
INF = 1e30
EPS = 1e-6
class Vec:
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
def __neg__(self):
return Vec(-self.x, -self.y, -self.z)
def __add__(self, rhs):
return Vec(self.x + rhs.x, self.y + rhs.y, self.z + rhs.z)
def __sub__(self, rhs):
return Vec(self.x - rhs.x, self.y - rhs.y, self.z - rhs.z)
def __mul__(self, rhs):
return Vec(self.x * rhs, self.y * rhs, self.z * rhs)
def length(self):
return (self.x ** 2 + self.y ** 2 + self.z ** 2) ** 0.5
def normalise(self):
l = self.length()
return Vec(self.x / l, self.y / l, self.z / l)
def dot(self, rhs):
return self.x * rhs.x + self.y * rhs.y + self.z * rhs.z
RGB = Vec
class Ray:
def __init__(self, p, d):
self.p, self.d = p, d
class View:
def __init__(self, width, height, depth, pos, xdir, ydir, zdir):
self.width = width
self.height = height
self.depth = depth
self.pos = pos
self.xdir = xdir
self.ydir = ydir
self.zdir = zdir
def calc_dir(self, dx, dy):
return (self.xdir * dx + self.ydir * dy + self.zdir * self.depth).normalise()
class Light:
def __init__(self, pos, colour, casts_shadows):
self.pos = pos
self.colour = colour
self.casts_shadows = casts_shadows
class Surface:
def __init__(self, diffuse, specular, spec_idx, reflect, transp, colour):
self.diffuse = diffuse
self.specular = specular
self.spec_idx = spec_idx
self.reflect = reflect
self.transp = transp
self.colour = colour
@staticmethod
def dull(colour):
return Surface(0.7, 0.0, 1, 0.0, 0.0, colour * 0.6)
@staticmethod
def shiny(colour):
return Surface(0.2, 0.9, 32, 0.8, 0.0, colour * 0.3)
@staticmethod
def transparent(colour):
return Surface(0.2, 0.9, 32, 0.0, 0.8, colour * 0.3)
class Sphere:
def __init__(self, surface, centre, radius):
self.surface = surface
self.centre = centre
self.radsq = radius ** 2
def intersect(self, ray):
v = self.centre - ray.p
b = v.dot(ray.d)
det = b ** 2 - v.dot(v) + self.radsq
if det > 0:
det **= 0.5
t1 = b - det
if t1 > EPS:
return t1
t2 = b + det
if t2 > EPS:
return t2
return INF
def surface_at(self, v):
return self.surface, (v - self.centre).normalise()
class Plane:
def __init__(self, surface, centre, normal):
self.surface = surface
self.normal = normal.normalise()
self.cdotn = centre.dot(normal)
def intersect(self, ray):
ddotn = ray.d.dot(self.normal)
if abs(ddotn) > EPS:
t = (self.cdotn - ray.p.dot(self.normal)) / ddotn
if t > 0:
return t
return INF
def surface_at(self, p):
return self.surface, self.normal
class Scene:
def __init__(self, ambient, light, objs):
self.ambient = ambient
self.light = light
self.objs = objs
def trace_scene(canvas, view, scene, max_depth):
for v in range(canvas.height):
y = (-v + 0.5 * (canvas.height - 1)) * view.height / canvas.height
for u in range(canvas.width):
x = (u - 0.5 * (canvas.width - 1)) * view.width / canvas.width
ray = Ray(view.pos, view.calc_dir(x, y))
c = trace_ray(scene, ray, max_depth)
canvas.put_pix(u, v, c)
def trace_ray(scene, ray, depth):
# Find closest intersecting object
hit_t = INF
hit_obj = None
for obj in scene.objs:
t = obj.intersect(ray)
if t < hit_t:
hit_t = t
hit_obj = obj
# Check if any objects hit
if hit_obj is None:
return RGB(0, 0, 0)
# Compute location of ray intersection
point = ray.p + ray.d * hit_t
surf, surf_norm = hit_obj.surface_at(point)
if ray.d.dot(surf_norm) > 0:
surf_norm = -surf_norm
# Compute reflected ray
reflected = ray.d - surf_norm * (surf_norm.dot(ray.d) * 2)
# Ambient light
col = surf.colour * scene.ambient
# Diffuse, specular and shadow from light source
light_vec = scene.light.pos - point
light_dist = light_vec.length()
light_vec = light_vec.normalise()
ndotl = surf_norm.dot(light_vec)
ldotv = light_vec.dot(reflected)
if ndotl > 0 or ldotv > 0:
light_ray = Ray(point + light_vec * EPS, light_vec)
light_col = trace_to_light(scene, light_ray, light_dist)
if ndotl > 0:
col += light_col * surf.diffuse * ndotl
if ldotv > 0:
col += light_col * surf.specular * ldotv ** surf.spec_idx
# Reflections
if depth > 0 and surf.reflect > 0:
col += trace_ray(scene, Ray(point + reflected * EPS, reflected), depth - 1) * surf.reflect
# Transparency
if depth > 0 and surf.transp > 0:
col += trace_ray(scene, Ray(point + ray.d * EPS, ray.d), depth - 1) * surf.transp
return col
def trace_to_light(scene, ray, light_dist):
col = scene.light.colour
for obj in scene.objs:
t = obj.intersect(ray)
if t < light_dist:
col *= obj.surface.transp
return col
class Canvas:
def __init__(self, width, height):
self.width = width
self.height = height
self.data = bytearray(3 * width * height)
def put_pix(self, x, y, c):
off = 3 * (y * self.width + x)
self.data[off] = min(255, max(0, int(255 * c.x)))
self.data[off + 1] = min(255, max(0, int(255 * c.y)))
self.data[off + 2] = min(255, max(0, int(255 * c.z)))
def write_ppm(self, filename):
with open(filename, 'wb') as f:
f.write(bytes('P6 %d %d 255\n' % (self.width, self.height), 'ascii'))
f.write(self.data)
def main(w, h, d):
canvas = Canvas(w, h)
view = View(32, 32, 64, Vec(0, 0, 50), Vec(1, 0, 0), Vec(0, 1, 0), Vec(0, 0, -1))
scene = Scene(
0.5,
Light(Vec(0, 8, 0), RGB(1, 1, 1), True),
[
Plane(Surface.dull(RGB(1, 0, 0)), Vec(-10, 0, 0), Vec(1, 0, 0)),
Plane(Surface.dull(RGB(0, 1, 0)), Vec(10, 0, 0), Vec(-1, 0, 0)),
Plane(Surface.dull(RGB(1, 1, 1)), Vec(0, 0, -10), Vec(0, 0, 1)),
Plane(Surface.dull(RGB(1, 1, 1)), Vec(0, -10, 0), Vec(0, 1, 0)),
Plane(Surface.dull(RGB(1, 1, 1)), Vec(0, 10, 0), Vec(0, -1, 0)),
Sphere(Surface.shiny(RGB(1, 1, 1)), Vec(-5, -4, 3), 4),
Sphere(Surface.dull(RGB(0, 0, 1)), Vec(4, -5, 0), 4),
Sphere(Surface.transparent(RGB(0.2, 0.2, 0.2)), Vec(6, -1, 8), 4),
]
)
trace_scene(canvas, view, scene, d)
return canvas
# For testing
#main(256, 256, 4).write_ppm('rt.ppm')
###########################################################################
# Benchmark interface
bm_params = {
(100, 100): (5, 5, 2),
(1000, 100): (18, 18, 3),
(5000, 100): (40, 40, 3),
}
def bm_setup(params):
return lambda: main(*params), lambda: (params[0] * params[1] * params[2], None)
| {
"repo_name": "trezor/micropython",
"path": "tests/perf_bench/misc_raytrace.py",
"copies": "2",
"size": "7278",
"license": "mit",
"hash": 6367417923459072000,
"line_mean": 29.0743801653,
"line_max": 98,
"alpha_frac": 0.5299532839,
"autogenerated": false,
"ratio": 3.004954582989265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.951052404798229,
"avg_score": 0.004876763781395278,
"num_lines": 242
} |
INF = 1e30
EPS = 1e-6
class Vec:
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
def __neg__(self):
return Vec(-self.x, -self.y, -self.z)
def __add__(self, rhs):
return Vec(self.x + rhs.x, self.y + rhs.y, self.z + rhs.z)
def __sub__(self, rhs):
return Vec(self.x - rhs.x, self.y - rhs.y, self.z - rhs.z)
def __mul__(self, rhs):
return Vec(self.x * rhs, self.y * rhs, self.z * rhs)
def length(self):
return (self.x ** 2 + self.y ** 2 + self.z ** 2) ** 0.5
def normalise(self):
l = self.length()
return Vec(self.x / l, self.y / l, self.z / l)
def dot(self, rhs):
return self.x * rhs.x + self.y * rhs.y + self.z * rhs.z
RGB = Vec
class Ray:
def __init__(self, p, d):
self.p, self.d = p, d
class View:
def __init__(self, width, height, depth, pos, xdir, ydir, zdir):
self.width = width
self.height = height
self.depth = depth
self.pos = pos
self.xdir = xdir
self.ydir = ydir
self.zdir = zdir
def calc_dir(self, dx, dy):
return (self.xdir * dx + self.ydir * dy + self.zdir * self.depth).normalise()
class Light:
def __init__(self, pos, colour, casts_shadows):
self.pos = pos
self.colour = colour
self.casts_shadows = casts_shadows
class Surface:
def __init__(self, diffuse, specular, spec_idx, reflect, transp, colour):
self.diffuse = diffuse
self.specular = specular
self.spec_idx = spec_idx
self.reflect = reflect
self.transp = transp
self.colour = colour
@staticmethod
def dull(colour):
return Surface(0.7, 0.0, 1, 0.0, 0.0, colour * 0.6)
@staticmethod
def shiny(colour):
return Surface(0.2, 0.9, 32, 0.8, 0.0, colour * 0.3)
@staticmethod
def transparent(colour):
return Surface(0.2, 0.9, 32, 0.0, 0.8, colour * 0.3)
class Sphere:
def __init__(self, surface, centre, radius):
self.surface = surface
self.centre = centre
self.radsq = radius ** 2
def intersect(self, ray):
v = self.centre - ray.p
b = v.dot(ray.d)
det = b ** 2 - v.dot(v) + self.radsq
if det > 0:
det **= 0.5
t1 = b - det
if t1 > EPS:
return t1
t2 = b + det
if t2 > EPS:
return t2
return INF
def surface_at(self, v):
return self.surface, (v - self.centre).normalise()
class Plane:
def __init__(self, surface, centre, normal):
self.surface = surface
self.normal = normal.normalise()
self.cdotn = centre.dot(normal)
def intersect(self, ray):
ddotn = ray.d.dot(self.normal)
if abs(ddotn) > EPS:
t = (self.cdotn - ray.p.dot(self.normal)) / ddotn
if t > 0:
return t
return INF
def surface_at(self, p):
return self.surface, self.normal
class Scene:
def __init__(self, ambient, light, objs):
self.ambient = ambient
self.light = light
self.objs = objs
def trace_scene(canvas, view, scene, max_depth):
for v in range(canvas.height):
y = (-v + 0.5 * (canvas.height - 1)) * view.height / canvas.height
for u in range(canvas.width):
x = (u - 0.5 * (canvas.width - 1)) * view.width / canvas.width
ray = Ray(view.pos, view.calc_dir(x, y))
c = trace_ray(scene, ray, max_depth)
canvas.put_pix(u, v, c)
def trace_ray(scene, ray, depth):
# Find closest intersecting object
hit_t = INF
hit_obj = None
for obj in scene.objs:
t = obj.intersect(ray)
if t < hit_t:
hit_t = t
hit_obj = obj
# Check if any objects hit
if hit_obj is None:
return RGB(0, 0, 0)
# Compute location of ray intersection
point = ray.p + ray.d * hit_t
surf, surf_norm = hit_obj.surface_at(point)
if ray.d.dot(surf_norm) > 0:
surf_norm = -surf_norm
# Compute reflected ray
reflected = ray.d - surf_norm * (surf_norm.dot(ray.d) * 2)
# Ambient light
col = surf.colour * scene.ambient
# Diffuse, specular and shadow from light source
light_vec = scene.light.pos - point
light_dist = light_vec.length()
light_vec = light_vec.normalise()
ndotl = surf_norm.dot(light_vec)
ldotv = light_vec.dot(reflected)
if ndotl > 0 or ldotv > 0:
light_ray = Ray(point + light_vec * EPS, light_vec)
light_col = trace_to_light(scene, light_ray, light_dist)
if ndotl > 0:
col += light_col * surf.diffuse * ndotl
if ldotv > 0:
col += light_col * surf.specular * ldotv ** surf.spec_idx
# Reflections
if depth > 0 and surf.reflect > 0:
col += trace_ray(scene, Ray(point + reflected * EPS, reflected), depth - 1) * surf.reflect
# Transparency
if depth > 0 and surf.transp > 0:
col += trace_ray(scene, Ray(point + ray.d * EPS, ray.d), depth - 1) * surf.transp
return col
def trace_to_light(scene, ray, light_dist):
col = scene.light.colour
for obj in scene.objs:
t = obj.intersect(ray)
if t < light_dist:
col *= obj.surface.transp
return col
class Canvas:
def __init__(self, width, height):
self.width = width
self.height = height
self.data = bytearray(3 * width * height)
def put_pix(self, x, y, c):
off = 3 * (y * self.width + x)
self.data[off] = min(255, max(0, int(255 * c.x)))
self.data[off + 1] = min(255, max(0, int(255 * c.y)))
self.data[off + 2] = min(255, max(0, int(255 * c.z)))
def write_ppm(self, filename):
with open(filename, "wb") as f:
f.write(bytes("P6 %d %d 255\n" % (self.width, self.height), "ascii"))
f.write(self.data)
def main(w, h, d):
canvas = Canvas(w, h)
view = View(32, 32, 64, Vec(0, 0, 50), Vec(1, 0, 0), Vec(0, 1, 0), Vec(0, 0, -1))
scene = Scene(
0.5,
Light(Vec(0, 8, 0), RGB(1, 1, 1), True),
[
Plane(Surface.dull(RGB(1, 0, 0)), Vec(-10, 0, 0), Vec(1, 0, 0)),
Plane(Surface.dull(RGB(0, 1, 0)), Vec(10, 0, 0), Vec(-1, 0, 0)),
Plane(Surface.dull(RGB(1, 1, 1)), Vec(0, 0, -10), Vec(0, 0, 1)),
Plane(Surface.dull(RGB(1, 1, 1)), Vec(0, -10, 0), Vec(0, 1, 0)),
Plane(Surface.dull(RGB(1, 1, 1)), Vec(0, 10, 0), Vec(0, -1, 0)),
Sphere(Surface.shiny(RGB(1, 1, 1)), Vec(-5, -4, 3), 4),
Sphere(Surface.dull(RGB(0, 0, 1)), Vec(4, -5, 0), 4),
Sphere(Surface.transparent(RGB(0.2, 0.2, 0.2)), Vec(6, -1, 8), 4),
],
)
trace_scene(canvas, view, scene, d)
return canvas
# For testing
# main(256, 256, 4).write_ppm('rt.ppm')
###########################################################################
# Benchmark interface
bm_params = {
(100, 100): (5, 5, 2),
(1000, 100): (18, 18, 3),
(5000, 100): (40, 40, 3),
}
def bm_setup(params):
return lambda: main(*params), lambda: (params[0] * params[1] * params[2], None)
| {
"repo_name": "dpgeorge/micropython",
"path": "tests/perf_bench/misc_raytrace.py",
"copies": "15",
"size": "7296",
"license": "mit",
"hash": 852058171284267800,
"line_mean": 27.2790697674,
"line_max": 98,
"alpha_frac": 0.5286458333,
"autogenerated": false,
"ratio": 3.0123864574731627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
""" A simple recurrent neural network that detects parity for arbitrary sequences. """
__author__ = 'Tom Schaul (tom@idsia.ch)'
from datasets import ParityDataSet #@UnresolvedImport
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.structure import RecurrentNetwork, LinearLayer, TanhLayer, BiasUnit, FullConnection
def buildParityNet():
net = RecurrentNetwork()
net.addInputModule(LinearLayer(1, name = 'i'))
net.addModule(TanhLayer(2, name = 'h'))
net.addModule(BiasUnit('bias'))
net.addOutputModule(TanhLayer(1, name = 'o'))
net.addConnection(FullConnection(net['i'], net['h']))
net.addConnection(FullConnection(net['bias'], net['h']))
net.addConnection(FullConnection(net['bias'], net['o']))
net.addConnection(FullConnection(net['h'], net['o']))
net.addRecurrentConnection(FullConnection(net['o'], net['h']))
net.sortModules()
p = net.params
p[:] = [-0.5, -1.5, 1, 1, -1, 1, 1, -1, 1]
p *= 10.
return net
def evalRnnOnSeqDataset(net, verbose = False, silent = False):
""" evaluate the network on all the sequences of a dataset. """
r = 0.
samples = 0.
for seq in DS:
net.reset()
for i, t in seq:
res = net.activate(i)
if verbose:
print t, res
r += sum((t-res)**2)
samples += 1
if verbose:
print '-'*20
r /= samples
if not silent:
print 'MSE:', r
return r
if __name__ == "__main__":
N = buildParityNet()
DS = ParityDataSet()
evalRnnOnSeqDataset(N, verbose = True)
print '(preset weights)'
N.randomize()
evalRnnOnSeqDataset(N)
print '(random weights)'
# Backprop improves the network performance, and sometimes even finds the global optimum.
N.reset()
bp = BackpropTrainer(N, DS, verbose = True)
bp.trainEpochs(5000)
evalRnnOnSeqDataset(N)
print '(backprop-trained weights)'
| {
"repo_name": "daanwierstra/pybrain",
"path": "examples/backprop/parityrnn.py",
"copies": "1",
"size": "1983",
"license": "bsd-3-clause",
"hash": -4245234454435755500,
"line_mean": 30.4761904762,
"line_max": 96,
"alpha_frac": 0.6212808875,
"autogenerated": false,
"ratio": 3.4667832167832167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4588064104283217,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Tom Schaul, tom@idsia.ch and Daan Wierstra'
from datasets import AnBnCnDataSet #@UnresolvedImport
from pybrain.supervised import BackpropTrainer
from pybrain.structure import FullConnection, RecurrentNetwork, TanhLayer, LinearLayer, BiasUnit
def testTraining():
# the AnBnCn dataset (sequential)
d = AnBnCnDataSet()
# build a recurrent network to be trained
hsize = 2
n = RecurrentNetwork()
n.addModule(TanhLayer(hsize, name = 'h'))
n.addModule(BiasUnit(name = 'bias'))
n.addOutputModule(LinearLayer(1, name = 'out'))
n.addConnection(FullConnection(n['bias'], n['h']))
n.addConnection(FullConnection(n['h'], n['out']))
n.addRecurrentConnection(FullConnection(n['h'], n['h']))
n.sortModules()
# initialize the backprop trainer and train
t = BackpropTrainer(n, learningrate = 0.1, momentum = 0.0, verbose = True)
t.trainOnDataset(d, 200)
# the resulting weights are in the network:
print 'Final weights:', n.params
if __name__ == '__main__':
testTraining() | {
"repo_name": "daanwierstra/pybrain",
"path": "examples/backprop/backpropanbncn.py",
"copies": "1",
"size": "1135",
"license": "bsd-3-clause",
"hash": -531837281751026050,
"line_mean": 33.4242424242,
"line_max": 96,
"alpha_frac": 0.6925110132,
"autogenerated": false,
"ratio": 3.3579881656804735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9435458756145195,
"avg_score": 0.023008084547055628,
"num_lines": 33
} |
# A simple recursive descent parser that implements an integer
# calculator. This parser suffers from an associativity problem
# due to using BNF-y recursion for rules like <term> ad <expr>
#
# BNF:
#
# <stmt> : set <id> = <expr>
# | <expr>
# <expr> : <term> + <expr>
# | <term> - <expr>
# | <term>
# <term> : <factor> * <term>
# | <factor> / <term>
# | <factor>
# <factor> : <id>
# | <number>
# | ( <expr> )
#
# <id> : [a-zA-Z_]\w+
# <number> : \d+
#
# This grammar is LL(1), suitable for predictive parsing.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
def __init__(self):
lex_rules = [
('set', 'SET'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def parse(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to parse can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
return self._stmt()
def _clear(self):
self.cur_token = None
self.var_table = {}
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s' % type)
# The toplevel rule of the parser.
#
# <stmt> : set <id> = <expr>
# | <expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._expr()
self.var_table[id_name] = expr_val
return expr_val
else:
return self._expr()
# <expr> : <term> + <expr>
# | <term> - <expr>
# | <term>
#
def _expr(self):
lval = self._term()
if self.cur_token.type == '+':
self._match('+')
op = lambda a, b: a + b
elif self.cur_token.type == '-':
self._match('-')
op = lambda a, b: a - b
else:
print 'returning lval = %s' % lval
return lval
rval = self._expr()
print 'lval = %s, rval = %s, res = %s' % (
lval, rval, op(lval, rval))
return op(lval, rval)
# <term> : <factor> * <term>
# | <factor> / <term>
# | <factor>
#
def _term(self):
lval = self._factor()
if self.cur_token.type == '*':
self._match('*')
op = lambda a, b: a * b
elif self.cur_token.type == '/':
self._match('/')
op = lambda a, b: a / b
else:
return lval
rval = self._term()
return op(lval, rval)
# <factor> : <id>
# | <number>
# | ( <expr> )
#
def _factor(self):
if self.cur_token.type == '(':
self._match('(')
val = self._expr()
self._match(')')
return val
elif self.cur_token.type == 'NUMBER':
return int(self._match('NUMBER'))
elif self.cur_token.type == 'IDENTIFIER':
id_name = self._match('IDENTIFIER')
try:
val = self.var_table[id_name]
except KeyError:
self._error('Unknown identifier `%s`' % id_name)
return val
else:
self._error('Invalid factor `%s`' % self.cur_token.val)
if __name__ == '__main__':
p = CalcParser()
print p.parse('5 - 1 - 2')
print p.parse('set x = 5')
print p.parse('set y = 2 * x')
print p.parse('(5+y)*3 + 3')
| {
"repo_name": "evandrix/Splat",
"path": "doc/parser/rd_parser_bnf.py",
"copies": "1",
"size": "5304",
"license": "mit",
"hash": -1430179665722663400,
"line_mean": 27.2127659574,
"line_max": 67,
"alpha_frac": 0.4283559578,
"autogenerated": false,
"ratio": 3.837916063675832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4766272021475832,
"avg_score": null,
"num_lines": null
} |
# A simple recursive descent parser that implements an integer
# calculator. This parser suffers from an associativity problem
# due to using BNF-y recursion for rules like <term> ad <expr>
#
# BNF:
#
# <stmt> : set <id> = <expr>
# | <expr>
# <expr> : <term> + <expr>
# | <term> - <expr>
# | <term>
# <term> : <factor> * <term>
# | <factor> / <term>
# | <factor>
# <factor> : <id>
# | <number>
# | ( <expr> )
#
# <id> : [a-zA-Z_]\w+
# <number> : \d+
#
# This grammar is LL(1), suitable for predictive parsing.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
def __init__(self):
lex_rules = [
('set', 'SET'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def parse(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to parse can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
return self._stmt()
def _clear(self):
self.cur_token = None
self.var_table = {}
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s' % type)
# The toplevel rule of the parser.
#
# <stmt> : set <id> = <expr>
# | <expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._expr()
self.var_table[id_name] = expr_val
return expr_val
else:
return self._expr()
# <expr> : <term> + <expr>
# | <term> - <expr>
# | <term>
#
def _expr(self):
lval = self._term()
if self.cur_token.type == '+':
self._match('+')
op = lambda a, b: a + b
elif self.cur_token.type == '-':
self._match('-')
op = lambda a, b: a - b
else:
print 'returning lval = %s' % lval
return lval
rval = self._expr()
print 'lval = %s, rval = %s, res = %s' % (
lval, rval, op(lval, rval))
return op(lval, rval)
# <term> : <factor> * <term>
# | <factor> / <term>
# | <factor>
#
def _term(self):
lval = self._factor()
if self.cur_token.type == '*':
self._match('*')
op = lambda a, b: a * b
elif self.cur_token.type == '/':
self._match('/')
op = lambda a, b: a / b
else:
return lval
rval = self._term()
return op(lval, rval)
# <factor> : <id>
# | <number>
# | ( <expr> )
#
def _factor(self):
if self.cur_token.type == '(':
self._match('(')
val = self._expr()
self._match(')')
return val
elif self.cur_token.type == 'NUMBER':
return int(self._match('NUMBER'))
elif self.cur_token.type == 'IDENTIFIER':
id_name = self._match('IDENTIFIER')
try:
val = self.var_table[id_name]
except KeyError:
self._error('Unknown identifier `%s`' % id_name)
return val
else:
self._error('Invalid factor `%s`' % self.cur_token.val)
if __name__ == '__main__':
p = CalcParser()
print p.parse('5 - 1 - 2')
print p.parse('set x = 5')
print p.parse('set y = 2 * x')
print p.parse('(5+y)*3 + 3')
| {
"repo_name": "shaoguangleo/code-for-blog",
"path": "2009/py_rd_parser_example/rd_parser_bnf.py",
"copies": "12",
"size": "5481",
"license": "unlicense",
"hash": 8007292128277706000,
"line_mean": 27.2127659574,
"line_max": 67,
"alpha_frac": 0.4145228973,
"autogenerated": false,
"ratio": 3.86803105151729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A simple regression problem using integer data."""
import random
import numpy as np
from pyshgp.gp.selection import Lexicase
from pyshgp.gp.estimators import PushEstimator
from pyshgp.gp.genome import GeneSpawner
from pyshgp.push.instruction_set import InstructionSet
def target_function(a, b):
"""Generate a training data point."""
return (2 * a * b) + (b * b)
X = np.arange(50).reshape(-1, 2)
y = np.array([[target_function(x[0], x[1])] for x in X])
instruction_set = (
InstructionSet()
.register_core_by_stack({"int"}, exclude_stacks={"str", "exec", "code"})
)
spawner = GeneSpawner(
n_inputs=2,
instruction_set=instruction_set,
literals=[],
erc_generators=[
lambda: random.randint(0, 10),
]
)
ep_lex_sel = Lexicase(epsilon=True)
if __name__ == '__main__':
est = PushEstimator(
population_size=300,
max_generations=50,
simplification_steps=500,
spawner=spawner,
selector=ep_lex_sel,
verbose=2
)
est.fit(X=X, y=y)
| {
"repo_name": "erp12/pyshgp",
"path": "examples/integer_regression.py",
"copies": "1",
"size": "1036",
"license": "mit",
"hash": 8999730877668130000,
"line_mean": 20.5833333333,
"line_max": 76,
"alpha_frac": 0.638996139,
"autogenerated": false,
"ratio": 3.158536585365854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9297532724365853,
"avg_score": 0,
"num_lines": 48
} |
""" A simple restful webservice to provide access to the wiki.db"""
import json
from bottle import Bottle, run, response, static_file, redirect
from dbfunctions import Wikidb
api = Bottle()
db = Wikidb()
@api.route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='./static')
@api.route('/api/search/<term>')
def search(term):
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.search(term))
@api.route('/api/detail/<subject>')
def details(subject):
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.detail(subject))
if __name__ == '__main__':
# Demonstrates the truely awesome awesomplete drawing data right from the search API above.
@api.route('/search')
def autocompletesearch():
return redirect('/static/autocomplete.html')
db.put('this is an article', 'this is the body of the article.')
db.put('this is another article', 'this is the body of the article.')
db.put('this is a third article', 'this is the body of the article.')
run(api, host='localhost',port=8080, debug=True)
| {
"repo_name": "mtik00/bottle-wiki",
"path": "wikiapi.py",
"copies": "1",
"size": "1234",
"license": "mit",
"hash": 6322702629511475000,
"line_mean": 28.380952381,
"line_max": 91,
"alpha_frac": 0.6896272285,
"autogenerated": false,
"ratio": 3.535816618911175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.958538034227096,
"avg_score": 0.028012701028042913,
"num_lines": 42
} |
""" A simple restful webservice to provide access to the wiki.db"""
import json
from bottle import run, response, static_file, redirect, request, route
from db.dbfunctions import Wikidb
db = Wikidb()
@route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='./static')
@route('/api/search/<term>')
def search(term):
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.search(term))
@route('/api/detail/<subject>')
def details(subject):
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.detail(subject))
@route('/api/put/<subject>/<body>')
def post(subject, body):
""" This is only a placeholder for a real post method."""
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
db.put(subject=subject, body=body)
return json.dumps(db.detail(subject))
@route('/api/tag/<subject>/<tag>')
def addtag(subject, tag):
"""Add tag to given subject"""
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.tag(tag, subject))
@route('/api/putjson', method='POST')
def jsonget():
data = dict(request.json)
db.put(subject=data.get('subject'), body=data.get('body'), email=data.get('email', 'anonymous'))
if 'tags' in data and 'subject' in data:
print(data['tags'])
subject = data.get('subject')
for t in data['tags']:
db.tag(t, subject)
if __name__ == '__main__':
# Demonstrates the truely awesome awesomplete drawing data right from the search API above.
@route('/search')
def autocompletesearch():
return redirect('/static/autocomplete.html')
db.put('this is an article', 'this is the body of the article.')
db.put('this is another article', 'this is the body of the article.')
db.put('this is a third article', 'this is the body of the article.')
run(host='localhost',port=8080, debug=True)
| {
"repo_name": "pysprings/bottle-wiki",
"path": "wikiapi.py",
"copies": "1",
"size": "2193",
"license": "mit",
"hash": 8602430130283323000,
"line_mean": 32.265625,
"line_max": 100,
"alpha_frac": 0.6484268126,
"autogenerated": false,
"ratio": 3.5658536585365854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4714280471136585,
"avg_score": null,
"num_lines": null
} |
"""A simpler interface to netCDF files.
"""
from pycdf import *
import numpy as N
import os.path
CREATE = NC.CREATE|NC.WRITE|NC.BIT64_OFFSET
def get_or_def_dim(ncf, name, length):
try:
len = ncf.dimensions()[name]
dim = ncf.inq_dimid(name)
except KeyError:
dim = ncf.def_dim(name, length)
return dim
class Positions(object):
pass
class Data(object):
_default_rank=None
def __init__(self, data, rank=None):
if rank is None:
rank = self._default_rank
pass
assert rank is not None
assert data.ndim > rank
self.rank = rank
self.rankstr = ['', ', vector', ', matrix'][rank]
itemshape = []
shape = list(data.shape)
for i in range(rank):
itemshape.insert(0, shape.pop())
continue
self.shape = tuple(shape)
self.itemshape = tuple(itemshape)
self.size = N.multiply.reduce(shape)
self.ndim = len(shape)
self.data = data
return
def define(self, ncf, name):
ncdims = []
for i, n in enumerate(self.shape):
ncdims.append(get_or_def_dim(ncf, name+'_values_n%d' % i, n))
continue
for i,n in enumerate(self.itemshape):
ncdims.append(get_or_def_dim(ncf, name+'_values_r%d' % i, n))
continue
return ncf.def_var(name+'_values', NC.FLOAT, ncdims)
def nameString(self, name):
return name + self.rankstr
def writeto(self, var):
var.put(self.data.astype(N.float32))
return
pass
class ScalarData(Data):
_default_rank = 0
pass
class VectorData(Data):
_default_rank = 1
pass
class MatrixData(Data):
_default_rank = 2
pass
class IrregularPositions(Positions):
def __init__(self, dim, positions):
assert dim in [1,2,3]
self.dim = dim
positions = N.atleast_2d(positions)
assert positions.ndim == 2
assert positions.shape[1] == dim
self.size = positions.shape[0]
self.shape = (self.size,)
self.ndim = 1
self.data = self.positions = positions
return
def define(self, ncf, basename):
sizeDim = get_or_def_dim(ncf, basename+'_values_n0', self.size)
dimDim = get_or_def_dim(ncf, basename+'_dim', self.dim)
return ncf.def_var(self.positionString(basename),
NC.FLOAT, (sizeDim, dimDim))
@staticmethod
def positionString(basename):
return basename+'_locations'
def writeto(self, var):
var.put(self.positions.astype(N.float32))
return
pass
class RegularPositions(Positions):
def __init__(self, axes):
dim = len(axes)
assert dim in [1,2,3]
self.dim = dim
self.axes = axes
for i, ax in enumerate(axes):
ax.dim = dim
ax.aID = i
continue
return
def define(self, ncf, basename):
return [a.define(ncf, basename)
for i, a in enumerate(self.axes)]
def positionString(self, basename):
ans = ''
for i in range(self.dim):
ans += self.axes[i].mkvarname(basename)
if self.dim>1:
ans += ', product'
pass
ans += self.axes[i].regular
if i < (self.dim - 1):
ans += '; '
continue
return ans
def writeto(self, vars):
for i in range(self.dim):
self.axes[i].writeto(vars[i])
return
pass
class Axis(object):
def mkvarname(self, basename):
return basename+'_axis_%d' % self.aID
def define(self, ncf, basename):
dimDim = get_or_def_dim(ncf, basename+'_naxes', self.dim)
sizeDim = get_or_def_dim(ncf, self.mkdimname(basename), self.n)
return ncf.def_var(self.mkvarname(basename), NC.FLOAT, (sizeDim, dimDim))
pass
class RegularAxis(Axis):
regular = ', compact'
def __init__(self, origin=0., delta=1., dim=None, aID=None):
self.origin = origin
self.delta = delta
self.n = 2
self.dim = dim
self.aID = aID
return
def mkdimname(self, basename):
return basename+'compact_dim'
def writeto(self, var):
pos = N.zeros((self.n, self.dim), dtype=N.float32)
pos[0, self.aID] = self.origin
pos[1, self.aID] = self.delta
var.put(pos)
return
pass
class IrregularAxis(Axis):
regular = ''
def __init__(self, points, dim=None, aID=None):
# has to be floats not doubles
points = N.array(points, dtype=N.float32).squeeze()
points = N.atleast_1d(points)
assert points.ndim == 2
assert len(points) >= 1
self.n = points.shape[0]
self.dim = points.shape[1]
self.points = points
self.aID = aID
return
def mkdimname(self, basename):
return self.mkvarname(basename)+'_len'
def writeto(self, var):
var.put(self.points)
return
pass
class Field(object):
def __init__(self, posns, **kwargs):
self.name = kwargs.pop('name', 'data')
self.positions = posns
assert self.checkdict(kwargs, lambda key, val: isinstance(val, Data)),\
"All keyword args must be Data istances"
self.dataDict = kwargs
return
def writeto(self, ncf):
#ncf = CDF(filename, mode=CREATE)
ncf.definemode()
pos = self.positions.define(ncf, self.name)
datavars = {}
for dname, d in self.dataDict.iteritems():
fullvarname = "%s_%s" % (self.name, dname)
dat = d.define(ncf, fullvarname)
dat.field = d.nameString(fullvarname)
dat.positions = self.positions.positionString(self.name)
datavars[dname] = dat
continue
ncf.enddef()
self.positions.writeto(pos)
[d.writeto(datavars[dname]) for dname, d in self.dataDict.iteritems()]
return
@classmethod
def checkdict(cls, d, condition):
for key, val in d.iteritems():
if condition(key, val):
# we're ok here
pass
else:
# test fails
return False
continue
return True
pass
class Unconnected(Field):
def __init__(self, posns, **kwargs):
assert isinstance(posns, IrregularPositions)
Field.__init__(self, posns, **kwargs)
assert self.checkdict(self.dataDict,
lambda key,val: val.ndim == 1), \
"data must be a list of positions"
assert self.checkdict(self.dataDict,
lambda key,val: val.size == posns.size), \
"data & posns must have same size"
return
pass
class Connected(Field):
def __init__(self, posns, **kwargs):
assert isinstance(posns, RegularPositions)
Field.__init__(self, posns, **kwargs)
return
pass
class LoadedData(object):
def __init__(self, dname, vname, v):
self.vname = vname
self.dname = dname
self.v = v
class LoadedField(object):
def __init__(self, name):
self.name = name
self.positions = None
self.dataDict = {}
return
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
dd = object.__getattribute__(self,'dataDict')
if name in dd:
return dd[name]
raise
return
pass
class LoadedFields(object):
def __init__(self, ncfName):
self.ncf = CDF(ncfName)
vars = self.ncf.variables()
dims = self.ncf.dimensions()
self.fields = {}
for vname, vinfo in vars.iteritems():
v = self.ncf.var(vname)
attr = v.attributes()
if 'field' in attr:
# it's a data field
splitFieldAttr = attr['field'].split(',')
try:
rankStr = splitFieldAttr[1].strip()
except IndexError:
rankStr = 'scalar'
fname, dname = splitFieldAttr[0].split('_')
try:
f = self.fields[fname]
except KeyError:
f = self.fields[fname] = LoadedField(fname)
pass
f.dataDict[dname] = Data(v.get(),
rank={'scalar': 0,
'vector': 1,
'matrix': 2}[rankStr])
if f.positions is not None:
continue
posdesc = attr['positions']
if 'product' in posdesc:
# connected
axes = []
for dimdesc in posdesc.split(';'):
parts = dimdesc.split(',')
aID = parts[0].split('_')[2]
dimvar = self.ncf.var(parts[0].strip())
dimdat = dimvar.get()
if 'compact' in parts:
origin = dimdat[0,aID]
delta = dimdat[1,aID]
axes.append(RegularAxis(origin,delta))
else:
axes.append(IrregularAxis(dimdat))
pass
continue
pos = RegularPositions(axes)
else:
# unconnected
posvar = self.ncf.var(posdesc.strip())
pos = IrregularPositions(dims[posvar.dimensions()[1]],
posvar.get())
pass
f.positions = pos
pass
continue
return
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
f = object.__getattribute__(self,'fields')
if name in f:
return f[name]
raise
return
pass
class Series(object):
def __init__(self, dir, **kwargs):
#mode='r', seriesvar='time', template='%.9d', clobber=False):
try:
func = {
'r': self.openForRead,
'w': self.create,
'a': self.openForWrite
}[kwargs.pop('mode', 'r')]
except KeyError:
raise ValueError('Invalid mode specified')
func(dir, **kwargs)
return
def openForRead(self, dir, **kwargs):
if not os.path.exists(dir):
raise IOError('Series directory does not exist: %s' % dir)
self.load(dir)
return
def openForWrite(self, dir, seriesvar='time', template='%.9d'):
if not os.path.exists(dir):
self.create(dir, seriesvar, template)
else:
self.load(dir)
return
def load(self, dir):
self.dir = dir
self.indexfile = os.path.join(dir, 'index')
if not os.path.exists(self.indexfile):
raise IOError('Series index does not exist: %s' % self.indexfile)
header = file(self.indexfile).readline()
if header[0] == '#':
_, self.seriesvar = header[1:].split()
else:
self.seriesvar = 'time'
pass
self.index = [[el[0], eval(el[1])] for el in N.loadtxt(self.indexfile, object)]
return
def create(self, dir, seriesvar='time', template='%.9d'):
if os.path.exists(dir):
#clobber it
import shutil
shutil.rmtree(dir)
pass
self.dir = dir
os.mkdir(dir)
self.indexfile = os.path.join(dir, 'index')
file(self.indexfile, 'w').write('# path %s\n' % seriesvar)
self.seriesvar = seriesvar
self.template = template
self.index = []
return
def nextFile(self, var):
filebase = (self.template % var) + '.ncdx'
filename = os.path.join(self.dir, filebase)
self.index.append([filebase, var])
file(self.indexfile, 'a').write('%s %s\n' % (filebase, repr(var)))
ncf = CDF(filename, mode=CREATE)
return ncf
def append(self, var, fields):
ncf = self.nextFile(var)
for f in fields:
f.writeto(ncf)
continue
return
def __len__(self):
return len(self.index)
def __getitem__(self, key):
i = bisect_left(self.index, key)
if self.index[i][1] == key:
return LoadedFields(os.path.join(self.dir, self.index[i][0]))
else:
raise IndexError('Key "%s" not in index of "%s"' % (repr(key), self.dir))
return
def __iter__(self):
for k in self.keys():
yield self[k]
def __contains__(self, key):
i = bisect_left(self.index, key)
if self.index[i][1] == key:
return True
else:
return False
return
def keys(self):
return [el[1] for el in self.index]
pass
def bisect_left(a, x):
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid][1] < x: lo = mid+1
else: hi = mid
return lo
def bisect_right(a, x):
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid][1]: hi = mid
else: lo = mid+1
return lo
# def scalarScatter(filename, values, positions, dim=3, name='data'):
# Unconnected.write(filename, values, positions, dim=dim, rank=0, name=name)
# def vectorScatter(filename, values, positions, dim=3, name='data'):
# Unconnected.write(filename, values, positions, dim=dim, rank=1, name=name)
# def matrixScatter(filename, values, positions, dim=3, name='data'):
# Unconnected.write(filename, values, positions, dim=dim, rank=2, name=name)
| {
"repo_name": "joakimstenhammar/subgrid",
"path": "python/dqTools/ncdx.py",
"copies": "2",
"size": "14609",
"license": "mit",
"hash": -509490569945319500,
"line_mean": 27.533203125,
"line_max": 87,
"alpha_frac": 0.4984598535,
"autogenerated": false,
"ratio": 4.011257550796266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5509717404296266,
"avg_score": null,
"num_lines": null
} |
"""A simple root of supervisor tree and a generic container."""
___all__ = [
'serve',
]
import logging
import os
import signal
import curio
from garage import asyncs
from garage.okay import OKAY, NOT_OKAY
LOG = logging.getLogger(__name__)
# The root node of the supervisor tree
async def serve(graceful_exit, grace_period, server_coros):
okay = NOT_OKAY
async with asyncs.TaskStack() as servers:
LOG.info('start servers: pid=%d', os.getpid())
for server_coro in server_coros:
await servers.spawn(server_coro)
all_tasks = list(servers)
# Also spawn default signal handler.
signal_handler_task = await asyncs.spawn(
signal_handler(graceful_exit, grace_period),
daemon=True,
)
all_tasks.append(signal_handler_task)
# When one server exits, normally or not, we bring down all
# other servers. But if it exits normally, we initiate a
# graceful exit.
server = await asyncs.select(all_tasks)
if server is signal_handler_task:
pass
elif server.exception:
LOG.error('server crash: %r', server, exc_info=server.exception)
else:
if not graceful_exit.is_set():
LOG.info('serve: notify graceful exit')
graceful_exit.set()
async with curio.ignore_after(grace_period) as timeout:
okay = await wait_servers(servers)
if timeout.expired:
LOG.warning('serve: exceed grace period %f', grace_period)
for server in servers:
if not server.terminated:
LOG.warning(
'serve: server is still running: %r', server)
# When we leave this block, TaskStack will cancel all the
# remaining tasks.
LOG.info('exit')
return okay
async def wait_servers(servers):
okay = OKAY
for server in servers:
await server.wait()
okay &= not server.exception
if server.exception:
LOG.error('server crash: %r', server, exc_info=server.exception)
else:
LOG.info('server exit: %r', server)
return okay
async def signal_handler(graceful_exit, grace_period):
# Exploit the fact that when one of the server task exits, the init
# task will bring down all other server tasks.
async with curio.SignalQueue(signal.SIGINT, signal.SIGTERM) as sigqueue:
sig = await sigqueue.get()
LOG.info('receive signal: %s', sig)
if sig == signal.SIGINT:
LOG.info('signal_handler: notify graceful exit')
graceful_exit.set()
elif sig == signal.SIGTERM:
return
else:
raise AssertionError('unknown signal: %s' % sig)
async with curio.ignore_after(grace_period):
sig = await sigqueue.get()
LOG.info('receive signal again: %s', sig)
return
LOG.warning('signal_handler: exceed grace period %f', grace_period)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/asyncs/servers.py",
"copies": "1",
"size": "3066",
"license": "mit",
"hash": 5898986106729975000,
"line_mean": 30.9375,
"line_max": 76,
"alpha_frac": 0.5988258317,
"autogenerated": false,
"ratio": 4.082556591211718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5181382422911718,
"avg_score": null,
"num_lines": null
} |
#A simple ROS node in python that listens for and responds to communication
#from the Julia node
import rospy
from geometry_msgs.msg import PoseStamped, Vector3
from std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse
from nav_msgs.srv import GetPlan, GetPlanRequest, GetPlanResponse
class Echo(object):
def __init__(self):
self._pub = rospy.Publisher("poses", PoseStamped, queue_size=10)
self._sub = rospy.Subscriber("vectors", Vector3, self.msg_cb, queue_size=10)
self._nrecv = 0
self._srvlisten = rospy.Service("callme", SetBool, self.srv_cb)
self._srvcall = rospy.ServiceProxy("getplan", GetPlan)
rospy.set_param("/received_service_call", False)
#Translate a Vector3 message to a PoseStamped and republish
def msg_cb(self, msg):
pmsg = PoseStamped()
pmsg.header.stamp = rospy.Time.now()
pmsg.pose.position.x = msg.x
pmsg.pose.position.y = msg.y
pmsg.pose.position.z = msg.z
self._pub.publish(pmsg)
self._nrecv += 1
rospy.set_param("/num_received_messages", self._nrecv)
def srv_cb(self, req):
if req.data:
self._calltimer = rospy.Timer(rospy.Duration(2.0), self.callsrv, oneshot=True)
rospy.set_param("/received_service_call", True)
return SetBoolResponse(True, "")
def callsrv(self, ev):
req = GetPlanRequest()
req.start.pose.position.x = 1.0
req.goal.pose.position.y = 1.0
rospy.wait_for_service("getplan")
resp = self._srvcall(req)
for pose in resp.plan.poses:
self._pub.publish(pose)
def main():
rospy.init_node("echo", anonymous=True)
n = Echo()
rospy.spin()
if __name__ == "__main__":
main()
| {
"repo_name": "phobon/RobotOS.jl",
"path": "test/echonode.py",
"copies": "3",
"size": "1766",
"license": "mit",
"hash": 1980502211784039700,
"line_mean": 31.7037037037,
"line_max": 90,
"alpha_frac": 0.6347678369,
"autogenerated": false,
"ratio": 3.3574144486692017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5492182285569202,
"avg_score": null,
"num_lines": null
} |
# A simple round off error estimation of polynomial expressions
# Alexey Solovyev, 2016
# https://github.com/monadius/poly_fp
# MIT License
from fractions import Fraction
from numbers import Rational
# This flag controls string conversion behavior of some internal objects.
# If True then additional information is printed.
verbose_flag = False
# If True then all variables are real-valued variables and hence
# all variables introduce rounding errors.
# If False then all variables are floating-point or fixed-point variables
# depending on their usage context.
real_vars_flag = False
# A template string for printing absolute values.
# Another possible value is "abs({0})".
abs_template = "|{0}|"
# A name of the relative error bound (machine epsilon).
eps_name = "eps"
# A name of the absolute error bound.
delta_name = "delta"
def set_verbose_flag(flag):
assert(type(flag) is bool)
global verbose_flag
verbose_flag = flag
def set_real_vars_flag(flag):
assert(type(flag) is bool)
global real_vars_flag
real_vars_flag = flag
def set_abs_template(s):
assert(isinstance(s, basestring))
global abs_template
abs_template = s
def set_eps_name(name):
assert(isinstance(name, basestring))
global eps_name
eps_name = name
def set_delta_name(name):
assert(isinstance(name, basestring))
global delta_name
delta_name = name
class Variable:
"""Defines a variable """
name = None
def __init__(self, name):
assert(isinstance(name, basestring))
self.name = name
def __repr__(self):
return "Variable('{0}')".format(self.name)
def __str__(self):
if verbose_flag:
return "var:" + self.name
else:
return self.name
class Constant:
"""Defines a constant """
value = None
def __init__(self, val):
if isinstance(val, basestring) or isinstance(val, Rational):
self.value = Fraction(val)
else:
raise TypeError("argument should be a string "
"or a Rational instance")
def __str__(self):
if verbose_flag:
return "const:" + str(self.value)
else:
return str(self.value)
def convert_to_expr(val):
"""Converts a given value to an expression.
Accepted values: Expr, Constant, Variable, string, Rational
"""
if isinstance(val, Expr):
return val
elif isinstance(val, Constant):
return ConstExpr(val)
elif isinstance(val, Variable):
return VarExpr(val)
elif isinstance(val, basestring) or isinstance(val, Rational):
return ConstExpr(Constant(val))
else:
raise TypeError("argument should be an instance of: "
"Expr, Constant, Variable, basestring, Rational")
class Expr:
"""A base class of expressions.
This class overloads '+', '-' (unary and binary), and '*'.
"""
def __neg__(self):
return NegExpr(self)
def __add__(self, other):
return AddExpr(self, convert_to_expr(other))
def __radd__(self, other):
return AddExpr(convert_to_expr(other), self)
def __sub__(self, other):
return SubExpr(self, convert_to_expr(other))
def __rsub__(self, other):
return SubExpr(convert_to_expr(other), self)
def __mul__(self, other):
return MulExpr(self, convert_to_expr(other))
def __rmul__(self, other):
return MulExpr(convert_to_expr(other), self)
class NegExpr(Expr):
"""Represents a negation of an expression"""
def __init__(self, expr):
self.expr = expr
def __str__(self):
return "-({0})".format(self.expr)
class AddExpr(Expr):
"""Represents a sum of two expressions """
left = None
right = None
def __init__(self, left, right):
assert(isinstance(left, Expr) and isinstance(right, Expr))
self.left = left
self.right = right
def __str__(self):
return "({0}) + ({1})".format(self.left, self.right)
class SubExpr(Expr):
"""Represents a difference of two expressions """
left = None
right = None
def __init__(self, left, right):
assert(isinstance(left, Expr) and isinstance(right, Expr))
self.left = left
self.right = right
def __str__(self):
return "({0}) - ({1})".format(self.left, self.right)
class MulExpr(Expr):
"""Represents a product of two expressions """
left = None
right = None
def __init__(self, left, right):
assert(isinstance(left, Expr) and isinstance(right, Expr))
self.left = left
self.right = right
def __str__(self):
return "({0}) * ({1})".format(self.left, self.right)
class VarExpr(Expr):
"""Represents an expression associated with a variable """
var = None
def __init__(self, var):
assert(isinstance(var, Variable))
self.var = var
def __str__(self):
if verbose_flag:
return "VarExpr({0})".format(self.var)
else:
return str(self.var)
class ConstExpr(Expr):
"""Represents an expression associated with a constant """
const = None
def __init__(self, const):
assert(isinstance(const, Constant))
self.const = const
def __str__(self):
if verbose_flag:
return "ConstExpr({0})".format(self.const)
else:
return str(self.const)
def mk_var_expr(name):
"""Creates a VarExpr from a given name"""
var = Variable(name)
return VarExpr(var)
def mk_const_expr(c):
"""Creates a ConstExpr from a given constant (string or number)"""
const = Constant(c)
return ConstExpr(const)
class ErrorTerm:
"""Represents an error term.
Error terms appear due to absolute and relative rounding errors.
The rounding model gives rnd(x) = x(1 + e) + d where
e is a relative error and d is an absolute error of rnd.
The current implementation is very simple and the role of each
error term should be derived from its context.
"""
global_index = 0
# error term index (different error terms have different indices)
index = None
# True if relative
relative = None
def __init__(self, index, relative):
assert (type(index) is int)
assert (type(relative) is bool)
self.index = index
self.relative = relative
def __repr__(self):
return "ErrorTerm({0}, {1})".format(self.index, self.relative)
def __str__(self):
if verbose_flag:
return self.__repr__()
else:
if self.relative:
return "e_" + str(self.index)
else:
return "d_" + str(self.index)
def __hash__(self):
return self.index
def __eq__(self, other):
assert (isinstance(other, ErrorTerm))
return self.index == other.index
def get_error_term(e=None, rel=True):
ErrorTerm.global_index += 1
return ErrorTerm(ErrorTerm.global_index, rel)
class Monomial:
"""Represents a monomial in the form c * (x * y * ...) * rel_error * abs_error.
Here c is a constant (fraction) and x, y, ... are variables;
rel_error = (1 + e1)(1 + e2)... is an accumulated relative error;
abs_error = d1 * d2 * ... is an accumulated absolute error.
"""
# constant coefficient (Fraction)
c = None
# list of variables ([Variable])
vars = None
# list of relative error terms ([ErrorTerm])
rel_errs = None
# list of absolute error terms ([ErrorTerm])
abs_errs = None
def __init__(self):
self.c = Fraction(1)
self.vars = []
self.rel_errs = []
self.abs_errs = []
def copy(self):
"""Creates a copy of itself"""
m = Monomial()
m.c = self.c
m.vars = list(self.vars)
m.rel_errs = list(self.rel_errs)
m.abs_errs = list(self.abs_errs)
return m
def __repr__(self):
return self.__str__()
def __str__(self):
c_str = str(self.c)
vars_str = "*".join([str(v) for v in self.vars])
rel_str = "*".join(["(1 + {0})".format(e) for e in self.rel_errs])
abs_str = "*".join(["{0}".format(e) for e in self.abs_errs])
return "*".join([s for s in [c_str, vars_str, rel_str, abs_str] if s != ""])
def var_expr_to_poly(expr):
"""Converts VarExpr to a polynomial (a list of monomials: [Monomial])"""
assert(isinstance(expr, VarExpr))
m = Monomial()
m.vars.append(expr.var)
return [m]
def const_expr_to_poly(expr):
"""Converts ConstExpr to a polynomial (a list of monomials)"""
assert(isinstance(expr, ConstExpr))
m = Monomial()
m.c = expr.const.value
return [m]
def rnd_poly(poly, rel_error, abs_error):
"""Rounds a given polynomial (a list of monomials) and returns
a new polynomial for the rounded result
"""
result = [m.copy() for m in poly]
if rel_error:
for m in result:
m.rel_errs.append(rel_error)
if abs_error:
abs_m = Monomial()
abs_m.abs_errs.append(abs_error)
result.append(abs_m)
return result
def neg_poly(p):
"""Returns a negation of a polynomial"""
result = [m.copy() for m in p]
for m in result:
m.c = -m.c
return result
def add_poly(p, g):
"""Returns a sum of two polynomials"""
return [m.copy() for m in p + g]
def sub_poly(p, g):
"""Returns a difference of two polynomials"""
result = [m.copy() for m in p]
for m in g:
k = m.copy()
k.c = -k.c
result.append(k)
return result
def mul_poly(p, g):
"""Returns a product of two polynomials"""
result = []
for m in p:
for n in g:
k = Monomial()
k.c = m.c * n.c
k.vars = m.vars + n.vars
k.rel_errs = m.rel_errs + n.rel_errs
k.abs_errs = m.abs_errs + n.abs_errs
result.append(k)
return result
def float_poly(expr):
"""Converts an expression (Expr) to a polynomial (a list of monomials)
which represents the corresponding floating-point expression.
It is assumed that all computations are done with the same floating-point format.
The standard floating-point rounding model is used:
VarExpr(x) if real_vars_flag = True --> rnd(x) = x * (1 + e) + d
VarExpr(x) if real_vars_flag = False --> rnd(x) = x
ConstExpr(c) --> rnd(c) = (1 + e) * c (it is assumed that all constants are normal)
AddExpr(e1, e2) --> rnd(e1 + e2) = (e1 + e2) * (1 + e) (subnormal results are exact and hence d = 0)
SubExpr(e1, e2) --> rnd(e1 - e2) = (e1 - e2) * (1 + e) (subnormal results are exact and hence d = 0)
MulExpr(e1, e2) --> rnd(e1 * e2) = (e1 * e2) * (1 + e) + d
"""
if isinstance(expr, VarExpr):
v = var_expr_to_poly(expr)
if real_vars_flag:
e = get_error_term(expr)
d = get_error_term(expr, rel=False)
return rnd_poly(v, e, d)
else:
return v
elif isinstance(expr, ConstExpr):
e = get_error_term(expr)
return rnd_poly(const_expr_to_poly(expr), e, None)
elif isinstance(expr, NegExpr):
p = float_poly(expr.expr)
return neg_poly(p)
elif isinstance(expr, AddExpr):
p1 = float_poly(expr.left)
p2 = float_poly(expr.right)
e = get_error_term(expr)
return rnd_poly(add_poly(p1, p2), e, None)
elif isinstance(expr, SubExpr):
p1 = float_poly(expr.left)
p2 = float_poly(expr.right)
e = get_error_term(expr)
return rnd_poly(sub_poly(p1, p2), e, None)
elif isinstance(expr, MulExpr):
p1 = float_poly(expr.left)
p2 = float_poly(expr.right)
e = get_error_term(expr)
d = get_error_term(expr, rel=False)
return rnd_poly(mul_poly(p1, p2), e, d)
def fixed_poly(expr):
"""Converts an expression (Expr) to a polynomial (a list of monomials)
which represents the corresponding fixed-point expression.
It is assumed that all computations are done with the same fixed-point format.
The standard fixed-point rounding model is used:
VarExpr(x) if real_vars_flag = True --> rnd(x) = x + d
VarExpr(x) if real_vars_flag = False --> rnd(x) = x
ConstExpr(c) --> rnd(c) = c + d
AddExpr(e1, e2) --> rnd(e1 + e2) = e1 + e2 (exact)
SubExpr(e1, e2) --> rnd(e1 - e2) = e1 - e2 (exact)
MulExpr(e1, e2) --> rnd(e1 * e2) = (e1 * e2) + d
"""
if isinstance(expr, VarExpr):
v = var_expr_to_poly(expr)
if real_vars_flag:
d = get_error_term(expr, rel=False)
return rnd_poly(v, None, d)
else:
return var_expr_to_poly(expr)
elif isinstance(expr, ConstExpr):
d = get_error_term(expr, rel=False)
return rnd_poly(const_expr_to_poly(expr), None, d)
elif isinstance(expr, NegExpr):
p = fixed_poly(expr.expr)
return neg_poly(p)
elif isinstance(expr, AddExpr):
p1 = fixed_poly(expr.left)
p2 = fixed_poly(expr.right)
return add_poly(p1, p2)
elif isinstance(expr, SubExpr):
p1 = fixed_poly(expr.left)
p2 = fixed_poly(expr.right)
return sub_poly(p1, p2)
elif isinstance(expr, MulExpr):
p1 = fixed_poly(expr.left)
p2 = fixed_poly(expr.right)
d = get_error_term(expr, rel=False)
return rnd_poly(mul_poly(p1, p2), None, d)
else:
raise TypeError("argument should be an Expr instance")
def get_real_part(poly):
"""Returns a real-valued part of a polynomial
(the part corresponding to the ideal real-valued computations without round off errors)
"""
result = []
for m in poly:
if not m.abs_errs:
t = Monomial()
t.c = m.c
t.vars = list(m.vars)
result.append(t)
return result
def get_rel_part(poly):
"""Returns a part of a polynomial which contains relative errors only (no absolute errors)"""
result = [m.copy() for m in poly if not m.abs_errs and m.rel_errs]
return result
def get_abs_part(poly):
"""Returns a part of a polynomial which contains absolute errors"""
result = [m.copy() for m in poly if m.abs_errs]
return result
def get_rel_error_bound(poly):
"""Returns a simple relative error bound of a polynomial.
The result is in the form [(Monomial, n)] where n is the number of relative error terms
corresponding to the Monomial.
Example:
poly = [m1(x) * (1 + e1) * (1 + e2), m2(x) * (1 + e3)]
get_rel_error_bound(poly) = [(m1(x), 2), (m2(x), 1)]
"""
result = []
r = get_rel_part(poly)
for m in r:
k = Monomial()
k.c = m.c
k.vars = m.vars
result.append((k, len(m.rel_errs)))
return result
def combine_rel_error(poly_rel_err):
"""Returns a simplified expression for a given relative error bound.
The input should be of the type [(Monomial, n)] where n's are integers.
This function multiplies all monomials by corresponding n's and finds
the maximum value of n's.
The result is ([Monomial], int).
"""
err = []
max_n = 0
for (m, n) in poly_rel_err:
if n > max_n:
max_n = n
k = m.copy()
k.c *= n
err.append(k)
return (err, max_n)
def get_lin_rel_error(poly):
"""Returns a linear part of the relative error.
This function combines monomials corresponding to the same error terms together.
The result of this function is a list of polynomials: [[Monomial]].
"""
result = {}
r = get_rel_part(poly)
for m in r:
k = Monomial()
k.c = m.c
k.vars = m.vars
for e in m.rel_errs:
if e in result:
result[e].append(k.copy())
else:
result[e] = [k.copy()]
return result.values()
def get_abs_error_bound(poly):
"""Returns a simple absolute error bound of a polynomial.
The result is in the form [(Monomial, k, n)] where
k is the number of absolute error terms and
n is the number of relative error terms
corresponding to the Monomial.
Example:
poly = [m1(x) * (1 + e1) * (1 + e2) * d4, m2(x) * (1 + e3) * d4 * d5 * d4]
get_abs_error_bound(poly) = [(m1(x), 2, 1), (m2(x), 1, 3)]
"""
result = []
r = get_abs_part(poly)
for m in r:
k = Monomial()
k.c = m.c
k.vars = m.vars
result.append((k, len(m.abs_errs), len(m.rel_errs)))
return result
def combine_abs_error(poly_abs_err):
"""Returns a simplified expression for a given absolute error bound.
The input should be of the type [(Monomial, k, n)] where k's and n's are integers.
All k's should be at least 1.
This function returns two polynomials: one with monomials for which k == 1 and another
with monomials for which k >= 2.
The result also contains maximum values of n's for both polynomials.
"""
err1 = []
err2 = []
max_n1 = 0
max_n2 = 0
for (m, a, n) in poly_abs_err:
assert (a >= 1)
if a >= 2:
err2.append(m.copy())
if n > max_n2:
max_n2 = n
else:
err1.append(m.copy())
if n > max_n1:
max_n1 = n
return (err1, max_n1, err2, max_n2)
def poly_to_str(poly):
"""Converts a polynomial (a list of monomials) into a string"""
if not poly:
return "0"
else:
return " + ".join([str(m) for m in poly])
def poly_to_str_abs(poly):
"""Returns a string corresponding to a polynomial where all monomials
are replaced by their absolute values"""
if not poly:
return "0"
else:
return " + ".join([abs_template.format(m) for m in poly])
def poly_err_to_str(poly_err, err_template):
"""Converts a polynomial error ([(Monomial, int)]) into a string"""
if not poly_err:
return "0"
strs = ["{0} * {1}".format(abs_template.format(m),
err_template.format(n)) for (m, n) in poly_err]
return " + ".join(strs)
def analyze_float(expr):
"""Analyzes a given expression and prints out all floating-point error bounds"""
fp = float_poly(expr)
err0_rel = get_rel_error_bound(fp)
err0_rel_combined, max_rel_n = combine_rel_error(err0_rel)
err1_rel = get_lin_rel_error(fp)
err2_rel = [(m, n) for (m, n) in err0_rel if n >= 2]
err2_rel2 = [(m, n * n) for (m, n) in err2_rel]
err_abs = get_abs_error_bound(fp)
err1_abs, max_abs1_n, err2_abs, max_abs2_n = combine_abs_error(err_abs)
v0_str = poly_to_str(get_real_part(fp))
err0_rel_str = poly_err_to_str(err0_rel,
"((1 + e)^{0} - 1)".replace("e", eps_name))
err0_rel_combined_str = poly_to_str_abs(err0_rel_combined)
template0 = " * d^{0}".replace("d", delta_name)
template1 = " * (1 + e)^{0}".replace("e", eps_name)
err_abs_strs = []
for (m, k, n) in err_abs:
s = abs_template.format(m) + template0.format(k)
if n > 0:
s += template1.format(n)
err_abs_strs.append(s)
if err_abs_strs:
err_abs_str = " + ".join(err_abs_strs)
else:
err_abs_str = "0"
err12_abs_str = poly_to_str_abs(err1_abs + err2_abs)
err1_rel_strs = [abs_template.format(poly_to_str(p)) for p in err1_rel]
err2_rel_str = poly_err_to_str(err2_rel,
"((1 + e)^{0} - 1 - {0}*e)".replace("e", eps_name))
err2_rel_str_combined = poly_to_str_abs(combine_rel_error(err2_rel2)[0])
print("float({0}) = v0 + error".format(expr))
print("v0 = {0}\n".format(v0_str))
print("error = err_rel + err_abs\n")
print("|err_rel| <= {0}".format(err0_rel_str))
print("|err_rel| <= ({0}) * eps / (1 - {1}*eps)\n"
.replace("eps", eps_name)
.format(err0_rel_combined_str, max_rel_n))
print("|err_abs| <= {0}".format(err_abs_str))
print("|err_abs| <= ({0}) * (1 + eps)^{1} * delta\n"
.replace("eps", eps_name)
.replace("delta", delta_name)
.format(err12_abs_str, max(max_abs1_n, max_abs2_n)))
if err1_rel:
print("err_rel = err_rel1 + err_rel2\n")
print("|err_rel1| <= ({0}) * eps"
.replace("eps", eps_name)
.format(" + ".join(err1_rel_strs)))
print("|err_rel2| <= {0}".format(err2_rel_str))
print("|err_rel2| <= ({0}) * eps^2 / (1 - {1}*eps)\n"
.replace("eps", eps_name)
.format(err2_rel_str_combined, max_rel_n))
def analyze_fixed(expr):
"""Analyzes a given expression and prints out all fixed-point error bounds"""
fx = fixed_poly(expr)
err_abs = get_abs_error_bound(fx)
err1_abs, max_abs1_n, err2_abs, max_abs2_n = combine_abs_error(err_abs)
v0_str = poly_to_str(get_real_part(fx))
template0 = " * d^{0}".replace("d", delta_name)
err_abs_strs = []
for (m, k, n) in err_abs:
assert(n == 0)
s = abs_template.format(m) + template0.format(k)
err_abs_strs.append(s)
if err_abs_strs:
err_abs_str = " + ".join(err_abs_strs)
else:
err_abs_str = "0"
err1_abs_str = poly_to_str_abs(err1_abs)
err2_abs_str = poly_to_str_abs(err2_abs)
print("fixed({0}) = v0 + error".format(expr))
print("v0 = {0}\n".format(v0_str))
print("|error| <= {0}\n".format(err_abs_str))
print("error = error1 + error2\n")
print("|error1| <= ({0}) * delta"
.replace("delta", delta_name)
.format(err1_abs_str))
print("|error2| <= ({0}) * delta^2\n"
.replace("delta", delta_name)
.format(err2_abs_str))
| {
"repo_name": "monadius/poly_fp",
"path": "poly_fp.py",
"copies": "1",
"size": "21765",
"license": "mit",
"hash": -5490345021547478000,
"line_mean": 27.7137203166,
"line_max": 104,
"alpha_frac": 0.5770273375,
"autogenerated": false,
"ratio": 3.3198596705308114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9394119979992915,
"avg_score": 0.000553405607579238,
"num_lines": 758
} |
"""A simple RPC client that shows how to do load balancing."""
#-----------------------------------------------------------------------------
# Copyright (C) 2012. Brian Granger, Min Ragan-Kelley
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
from zpyrpc import RPCServiceProxy, RemoteRPCError, JSONSerializer
if __name__ == '__main__':
# Custom serializer/deserializer functions can be passed in. The server
# side ones must match.
echo = RPCServiceProxy(serializer=JSONSerializer())
echo.connect('tcp://127.0.0.1:5555')
print "Echoing: ", echo.echo("Hi there")
try:
echo.error()
except RemoteRPCError, e:
print "Got a remote exception:"
print e.ename
print e.evalue
print e.traceback
math = RPCServiceProxy()
# By connecting to two instances, requests are load balanced.
math.connect('tcp://127.0.0.1:5556')
math.connect('tcp://127.0.0.1:5557')
for i in range(5):
for j in range(5):
print "Adding: ", i, j, math.add(i,j)
| {
"repo_name": "ellisonbg/zpyrpc",
"path": "examples/client.py",
"copies": "1",
"size": "1211",
"license": "bsd-3-clause",
"hash": 8125012656837247000,
"line_mean": 35.696969697,
"line_max": 78,
"alpha_frac": 0.5664739884,
"autogenerated": false,
"ratio": 4.023255813953488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5089729802353489,
"avg_score": null,
"num_lines": null
} |
"""A simple RPC server that show how to run multiple RPC services."""
#-----------------------------------------------------------------------------
# Copyright (C) 2012. Brian Granger, Min Ragan-Kelley
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import time
from zpyrpc import RPCService, rpc_method, JSONSerializer
from zmq.eventloop import ioloop
from zmq.utils import jsonapi
class Echo(RPCService):
@rpc_method
def echo(self, s):
print "%r echo %r" % (self.urls, s)
return s
@rpc_method
def sleep(self, t):
time.sleep(t)
@rpc_method
def error(self):
raise ValueError('raising ValueError for fun!')
class Math(RPCService):
@rpc_method
def add(self, a, b):
print "%r add %r %r" % (self.urls, a, b)
return a+b
@rpc_method
def subtract(self, a, b):
print "%r subtract %r %r" % (self.urls, a, b)
return a-b
@rpc_method
def multiply(self, a, b):
print "%r multiply %r %r" % (self.urls, a, b)
return a*b
@rpc_method
def divide(self, a, b):
print "%r divide %r %r" % (self.urls, a, b)
return a/b
if __name__ == '__main__':
# Multiple RPCService instances can be run in a single process
# Custom serializer/deserializer functions can be passed in. The server
# side ones must match.
echo = Echo(serializer=JSONSerializer())
echo.bind('tcp://127.0.0.1:5555')
# We create two Math services to simulate load balancing. A client can
# connect to both of these services and requests will be load balanced.
math1 = Math()
math2 = Math()
math1.bind('tcp://127.0.0.1:5556')
math2.bind('tcp://127.0.0.1:5557')
ioloop.IOLoop.instance().start()
| {
"repo_name": "ellisonbg/zpyrpc",
"path": "examples/server.py",
"copies": "1",
"size": "1926",
"license": "bsd-3-clause",
"hash": -4625399187713788000,
"line_mean": 26.5142857143,
"line_max": 78,
"alpha_frac": 0.5716510903,
"autogenerated": false,
"ratio": 3.647727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9641912150026417,
"avg_score": 0.015493242600171315,
"num_lines": 70
} |
"""A simple rss parse and printer"""
import webapp2
from lib import feedparser
URL = 'https://www.youtube.com/rss/user/VEVO/feed.rss'
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
self.response.headers['Charset'] = 'UTF-8'
d = self.get_feed()
self.response.write('<html><head></head><body>')
# Print the channel name
self.response.write('<h1>' + d['feed']['title'] + '</h1>')
self.response.write('<ul>')
for i in d['entries']:
# We extract the url and title of the video
url = i['links'][0]['href']
title = i['title']
self.response.write('<li><a href="' + url + '">')
self.response.write(title)
self.response.write('</li>')
self.response.write('</ul>')
self.response.write('</body></html>')
def get_feed(self):
d = feedparser.parse(URL)
return d
application = webapp2.WSGIApplication([
('/', MainPage),
], debug=True) | {
"repo_name": "robertoallende/feedparser-gae",
"path": "parserss.py",
"copies": "1",
"size": "1112",
"license": "mit",
"hash": -838971317141064400,
"line_mean": 26.825,
"line_max": 66,
"alpha_frac": 0.5395683453,
"autogenerated": false,
"ratio": 3.8344827586206898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48740511039206896,
"avg_score": null,
"num_lines": null
} |
"""A simple script demonstrating the basic usage of the cb2_robot class"""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cb2_robot
import time
import random
def main():
HOST = "192.168.1.100" # The remote host
PORT = 30003 # The same port as used by the server
with cb2_robot.URRobot(HOST, PORT) as robot:
angleStart = [90, -95, 90, 0, 90, 90]
angleStart = map(cb2_robot.cb2_send.deg_2_rad, angleStart)
center = [100.0/1000, -475.0/1000, 425.0/1000, 1.2, -1.2, 1.2]
robot.add_goal(cb2_robot.Goal(angleStart, False, 'joint'))
thisMove = list(center)
thisMove[0] = center[0] - .2
thisMove[2] = center[2] - .2
robot.add_goal(cb2_robot.Goal(thisMove, True, 'linear'))
thisMove = list(center)
thisMove[0] = center[0] + .2
thisMove[2] = center[2] - .2
robot.add_goal(cb2_robot.Goal(thisMove, True, 'linear'))
thisMove = list(center)
thisMove[0] = center[0] + .2
thisMove[2] = center[2] + .2
robot.add_goal(cb2_robot.Goal(thisMove, True, 'linear'))
thisMove = list(center)
thisMove[0] = center[0] - .2
thisMove[2] = center[2] + .2
robot.add_goal(cb2_robot.Goal(thisMove, True, 'linear'))
# robot.move_now()
while not robot.goals.empty():
robot.move_on_stop()
print 'complete loop 1'
while not (robot.is_stopped() and robot.at_goal()):
time.sleep(.01)
for i in range(0, 15):
thisMove = list(center)
thisMove[0] = center[0] + random.uniform(-.2, .2)
thisMove[2] = center[2] + random.uniform(-.2, .2)
robot.add_goal(cb2_robot.Goal(thisMove, True, 'linear'))
while not robot.goals.empty():
robot.move_on_stop()
print 'complete loop 2'
if __name__=="main":
main()
| {
"repo_name": "IRIM-Technology-Transition-Lab/ur_cb2",
"path": "ur_cb2/cb2_robot_example.py",
"copies": "1",
"size": "2970",
"license": "mit",
"hash": 6426528930396537000,
"line_mean": 36.125,
"line_max": 80,
"alpha_frac": 0.6400673401,
"autogenerated": false,
"ratio": 3.498233215547703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4638300555647703,
"avg_score": null,
"num_lines": null
} |
# A simple script for enabling and disabling per-vif and tunnel interface rules for explicitly
# allowing broadcast/multicast traffic from the tunnel ports and on the port where the VIF is attached
import copy
import os
import sys
import cloudstack_pluginlib as pluginlib
pluginlib.setup_logging("/var/log/cloud/ovstunnel.log")
def clear_flows(bridge, this_vif_ofport, vif_ofports):
action = "".join("output:%s," % ofport
for ofport in vif_ofports)[:-1]
# Remove flow entries originating from given ofport
pluginlib.del_flows(bridge, in_port=this_vif_ofport)
# The following will remove the port being delete from actions
pluginlib.add_flow(bridge, priority=1100,
dl_dst='ff:ff:ff:ff:ff:ff', actions=action)
pluginlib.add_flow(bridge, priority=1100,
nw_dst='224.0.0.0/24', actions=action)
def apply_flows(bridge, this_vif_ofport, vif_ofports):
action = "".join("output:%s," % ofport
for ofport in vif_ofports)[:-1]
# Ensure {b|m}casts sent from VIF ports are always allowed
pluginlib.add_flow(bridge, priority=1200,
in_port=this_vif_ofport,
dl_dst='ff:ff:ff:ff:ff:ff',
actions='NORMAL')
pluginlib.add_flow(bridge, priority=1200,
in_port=this_vif_ofport,
nw_dst='224.0.0.0/24',
actions='NORMAL')
# Ensure {b|m}casts are always propagated to VIF ports
pluginlib.add_flow(bridge, priority=1100,
dl_dst='ff:ff:ff:ff:ff:ff', actions=action)
pluginlib.add_flow(bridge, priority=1100,
nw_dst='224.0.0.0/24', actions=action)
def clear_rules(vif):
try:
delcmd = "/sbin/ebtables -t nat -L PREROUTING | grep " + vif
delcmds = pluginlib.do_cmd(['/bin/bash', '-c', delcmd]).split('\n')
for cmd in delcmds:
try:
cmd = '/sbin/ebtables -t nat -D PREROUTING ' + cmd
pluginlib.do_cmd(['/bin/bash', '-c', cmd])
except:
pass
except:
pass
def main(command, vif_raw):
if command not in ('online', 'offline'):
return
vif_name, dom_id, vif_index = vif_raw.split('-')
# validate vif and dom-id
this_vif = "%s%s.%s" % (vif_name, dom_id, vif_index)
# Make sure the networking stack is not linux bridge!
net_stack = pluginlib.do_cmd(['cat', '/etc/xensource/network.conf'])
if net_stack.lower() == "bridge":
if command == 'offline':
clear_rules(this_vif)
# Nothing to do here!
return
bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'iface-to-br', this_vif])
# find xs network for this bridge, verify is used for ovs tunnel network
xs_nw_uuid = pluginlib.do_cmd([pluginlib.XE_PATH, "network-list",
"bridge=%s" % bridge, "--minimal"])
ovs_tunnel_network = pluginlib.is_regular_tunnel_network(xs_nw_uuid)
# handle case where network is reguar tunnel network
if ovs_tunnel_network == 'True':
vlan = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'br-to-vlan', bridge])
if vlan != '0':
# We need the REAL bridge name
bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH,
'br-to-parent', bridge])
vsctl_output = pluginlib.do_cmd([pluginlib.VSCTL_PATH,
'list-ports', bridge])
vifs = vsctl_output.split('\n')
vif_ofports = []
vif_other_ofports = []
for vif in vifs:
vif_ofport = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'get',
'Interface', vif, 'ofport'])
if this_vif == vif:
this_vif_ofport = vif_ofport
if vif.startswith('vif'):
vif_ofports.append(vif_ofport)
if command == 'offline':
vif_other_ofports = copy.copy(vif_ofports)
vif_other_ofports.remove(this_vif_ofport)
clear_flows(bridge, this_vif_ofport, vif_other_ofports)
if command == 'online':
apply_flows(bridge, this_vif_ofport, vif_ofports)
# handle case where bridge is setup for VPC which is enabled for distributed routing
ovs_vpc_distributed_vr_network = pluginlib.is_vpc_network_with_distributed_routing(xs_nw_uuid)
if ovs_vpc_distributed_vr_network == 'True':
vlan = pluginlib.do_cmd([pluginlib.VSCTL_PATH, 'br-to-vlan', bridge])
if vlan != '0':
# We need the REAL bridge name
bridge = pluginlib.do_cmd([pluginlib.VSCTL_PATH,
'br-to-parent', bridge])
vif_network_id = pluginlib.get_network_id_for_vif(this_vif)
pluginlib.update_flooding_rules_on_port_plug_unplug(bridge, this_vif, command, vif_network_id)
return
if __name__ == "__main__":
if len(sys.argv) != 3:
print "usage: %s [online|offline] vif-domid-idx" % \
os.path.basename(sys.argv[0])
sys.exit(1)
else:
command, vif_raw = sys.argv[1:3]
main(command, vif_raw)
| {
"repo_name": "MissionCriticalCloud/cosmic",
"path": "cosmic-core/scripts/src/main/resources/scripts/vm/hypervisor/xenserver/ovs-vif-flows.py",
"copies": "2",
"size": "5227",
"license": "apache-2.0",
"hash": 8195013757565932000,
"line_mean": 39.2076923077,
"line_max": 102,
"alpha_frac": 0.5758561316,
"autogenerated": false,
"ratio": 3.5727956254272044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5148651757027204,
"avg_score": null,
"num_lines": null
} |
#A simple script for testing pypair
from pypair import Tournament
import random
import os
home = os.path.expanduser("~")
to = Tournament()
#for p in range(10):
# to.addPlayer( p, "Timmy" )
to.loadPlayersCSV("playerlist.csv")
pairings1 = to.pairRound()
print(pairings1)
for table in pairings1:
if not type(pairings1[table]) is str:
per = random.randint(1, 100)
if per < 25:
to.reportMatch(table, [2,0,0])
elif per < 47:
to.reportMatch(table, [2,1,0])
elif per < 60:
to.reportMatch(table, [0,2,0])
elif per < 97:
to.reportMatch(table, [1,2,0])
elif per < 98:
to.reportMatch(table, [0,0,1])
else:
to.reportMatch(table, [1,1,1])
to.saveEventData("%s/datadump1.txt"%home)
print("")
#print to.playersDict[256]
print("")
pairings2 = to.pairRound()
print(pairings2)
for table in pairings2:
if not type(pairings2[table]) is str:
per = random.randint(1, 100)
if per < 25:
to.reportMatch(table, [2,0,0])
elif per < 47:
to.reportMatch(table, [2,1,0])
elif per < 60:
to.reportMatch(table, [0,2,0])
elif per < 97:
to.reportMatch(table, [1,2,0])
elif per < 98:
to.reportMatch(table, [0,0,1])
else:
to.reportMatch(table, [1,1,1])
to.saveEventData("%s/datadump2.txt"%home)
print("")
#print to.playersDict[256]
print("")
pairings3 = to.pairRound()
print(pairings3)
for table in pairings3:
if not type(pairings3[table]) is str:
per = random.randint(1, 100)
if per < 25:
to.reportMatch(table, [2,0,0])
elif per < 47:
to.reportMatch(table, [2,1,0])
elif per < 60:
to.reportMatch(table, [0,2,0])
elif per < 97:
to.reportMatch(table, [1,2,0])
elif per < 98:
to.reportMatch(table, [0,0,1])
else:
to.reportMatch(table, [1,1,1])
to.saveEventData("%s/datadump3.txt"%home)
print("")
#print to.playersDict[256]
print("")
pairings4 = to.pairRound()
print(pairings4)
for table in pairings4:
if not type(pairings4[table]) is str:
per = random.randint(1, 100)
if per < 25:
to.reportMatch(table, [2,0,0])
elif per < 47:
to.reportMatch(table, [2,1,0])
elif per < 60:
to.reportMatch(table, [0,2,0])
elif per < 97:
to.reportMatch(table, [1,2,0])
elif per < 98:
to.reportMatch(table, [0,0,1])
else:
to.reportMatch(table, [1,1,1])
to.saveEventData("%s/datadump4.txt"%home)
print("")
#print to.playersDict[256]
print("")
pairings5 = to.pairRound()
print(pairings5)
for table in pairings5:
if not type(pairings5[table]) is str:
per = random.randint(1, 100)
if per < 25:
to.reportMatch(table, [2,0,0])
elif per < 47:
to.reportMatch(table, [2,1,0])
elif per < 60:
to.reportMatch(table, [0,2,0])
elif per < 97:
to.reportMatch(table, [1,2,0])
elif per < 98:
to.reportMatch(table, [0,0,1])
else:
to.reportMatch(table, [1,1,1])
to.saveEventData("%s/datadump5.txt"%home)
print("")
print(to.playersDict[256])
print("")
| {
"repo_name": "JeffHoogland/pypair",
"path": "test.py",
"copies": "1",
"size": "3363",
"license": "bsd-3-clause",
"hash": 2284880252990313000,
"line_mean": 22.3541666667,
"line_max": 42,
"alpha_frac": 0.5504014273,
"autogenerated": false,
"ratio": 2.9474145486415426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39978159759415427,
"avg_score": null,
"num_lines": null
} |
"""A simple script that attempts to directly download a single blob from a given peer"""
import argparse
import logging
import sys
import tempfile
from twisted.internet import defer
from twisted.internet import reactor
from zope.interface import implements
from lbrynet import interfaces
from lbrynet import conf
from lbrynet.core import log_support
from lbrynet.core import BlobManager
from lbrynet.core import HashAnnouncer
from lbrynet.core import HashBlob
from lbrynet.core import RateLimiter
from lbrynet.core import Peer
from lbrynet.core import Wallet
from lbrynet.core.client import BlobRequester
from lbrynet.core.client import ConnectionManager
log = logging.getLogger()
SUCCESS = False
def main(args=None):
conf.initialize_settings()
parser = argparse.ArgumentParser()
parser.add_argument('--timeout', type=int, default=30)
parser.add_argument('peer')
parser.add_argument('blob_hash')
args = parser.parse_args(args)
log_support.configure_console(level='DEBUG')
announcer = HashAnnouncer.DummyHashAnnouncer()
blob_manager = MyBlobManager(announcer)
blob = HashBlob.TempBlob(args.blob_hash, False)
download_manager = SingleBlobDownloadManager(blob)
peer = Peer.Peer(*conf.server_port(args.peer))
payment_rate_manager = DumbPaymentRateManager()
wallet = getWallet()
requester = SingleBlobRequester(
peer, blob_manager, payment_rate_manager, wallet, download_manager)
rate_limiter = RateLimiter.DummyRateLimiter()
downloader = SingleBlobDownloader()
connection_manager = ConnectionManager.ConnectionManager(
downloader, rate_limiter, [requester], [wallet.get_info_exchanger()])
reactor.callLater(args.timeout, reactor.stop)
d = connection_manager.start()
d.addErrback(log_support.failure, 'Something bad happened: %s')
reactor.run()
if SUCCESS:
sys.exit(0)
else:
sys.exit(1)
class MyBlobManager(BlobManager.BlobManager):
def blob_completed(self, blob):
global SUCCESS
log.info('Blob has been downloaded, we can stop')
# this feels pretty hacky, but its as good of a stopping point as any
SUCCESS = True
reactor.stop()
def getWallet():
config = {'auto_connect': True}
if conf.settings['lbryum_wallet_dir']:
config['lbryum_path'] = conf.settings['lbryum_wallet_dir']
storage = Wallet.InMemoryStorage()
return Wallet.LBRYumWallet(storage, config)
class SingleBlobDownloader(object):
def insufficientfunds(self, err):
pass
class SingleBlobDownloadManager(object):
def __init__(self, blob):
self.blob = blob
def needed_blobs(self):
if self.blob.verified:
return []
else:
return [self.blob]
class NullStrategy(object):
def __init__(self):
self.pending_sent_offers = {}
class DumbPaymentRateManager(object):
def __init__(self):
self.strategy = NullStrategy()
def price_limit_reached(self, peer):
return False
def get_rate_blob_data(self, *args):
return 0.0
def record_offer_reply(self, peer, offer):
pass
def record_points_paid(self, point_ammount):
pass
class FreeDownload(BlobRequester.DownloadRequest):
def _pay_peer(self, *args):
# TODO: somewhere I missed the part that is supposed to get
# and address from the remote server for where to send
# data fees to so we can't make payments. Probably has
# to do with the wallet_info_exchanger
pass
class SingleBlobRequester(BlobRequester.BlobRequester):
implements(interfaces.IRequestCreator)
DownloadRequest = FreeDownload
def __init__(self, peer, blob_manager, payment_rate_manager, wallet, download_manager):
self.peer = peer
self.sent = False
BlobRequester.BlobRequester.__init__(
self, blob_manager, None, payment_rate_manager, wallet, download_manager)
def __repr__(self):
return 'SingleBlobRequestor({!r})'.format(self.peer)
def get_new_peers(self):
if self.sent:
return defer.succeed([])
else:
self.sent = True
return defer.succeed([self.peer])
def send_next_request(self, peer, protocol):
return self._send_next_request(peer, protocol)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "zestyr/lbry",
"path": "scripts/download_blob_from_peer.py",
"copies": "1",
"size": "4405",
"license": "mit",
"hash": 4201455234363465700,
"line_mean": 28.1721854305,
"line_max": 91,
"alpha_frac": 0.6819523269,
"autogenerated": false,
"ratio": 3.837108013937282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019060340837282,
"avg_score": null,
"num_lines": null
} |
"""A simple script that connects with a wii controller and starts experiment"""
import errno
import time
from select import poll, POLLIN
import threading
import Queue
from src.experiment import Experiment
from src.tasks_gui import TasksGui
class App(threading.Thread):
"""A application class puts accel data into a queue"""
def __init__(self):
try:
self.xwiimote = __import__("xwiimote")
except ImportError:
print "No xwiimote found"
exit(1)
else:
self.fd_value = None
self.dev = None
self.ini_xwii()
self.queue = Queue.Queue()
self.poll = poll()
self.poll.register(self.fd_value, POLLIN)
self.loop_active = True
gui = TasksGui()
experiment = Experiment(gui)
threading.Thread.__init__(self)
self.start()
output_file = None
while self.loop_active:
evt = self.queue.get()
if evt[0] == 1:
experiment.press_b_down(evt[1])
elif evt[0] == 2:
experiment.press_b_up(evt[1])
if experiment.is_finished():
output_file = open(
"data/record-"+str(time.time())+".txt", "w"
)
output_file.write(experiment.get_output())
output_file.close()
self.loop_active = False
elif evt[0] == 3:
experiment.accel(evt[1], evt[2], evt[3], evt[4])
elif evt[0] == 4:
self.loop_active = False
gui.quit()
def ini_xwii(self):
"""Find the WiiController"""
# display a constant
print "=== " + self.xwiimote.NAME_CORE + " ==="
# list wiimotes and remember the first one
try:
mon = self.xwiimote.monitor(True, True)
print "mon fd", mon.get_fd(False)
ent = mon.poll()
first_wiimote = ent
while ent is not None:
print "Found device: " + ent
ent = mon.poll()
except SystemError as ex:
print "ooops, cannot create monitor (", ex, ")"
# continue only if there is a wiimote
if first_wiimote is None:
print "No wiimote to read"
exit(0)
# create a new iface
try:
self.dev = self.xwiimote.iface(first_wiimote)
except IOError as ex:
print "ooops,", ex
exit(1)
# display some information and open the iface
try:
print "syspath:" + self.dev.get_syspath()
self.fd_value = self.dev.get_fd()
print "fd:", self.fd_value
print "opened mask:", self.dev.opened()
self.dev.open(
self.dev.available() | self.xwiimote.IFACE_WRITABLE
)
print "opened mask:", self.dev.opened()
print "capacity:", self.dev.get_battery(), "%"
except SystemError as ex:
print "ooops", ex
exit(1)
def run(self):
# read some values
evt = self.xwiimote.event()
local_loop_active = True
while local_loop_active and self.loop_active:
self.poll.poll()
try:
self.dev.dispatch(evt)
if evt.type == self.xwiimote.EVENT_KEY:
key, state = evt.get_key()
if key == self.xwiimote.KEY_B:
if state == 1:
self.queue.put([1, time.time()])
elif state == 0:
self.queue.put([2, time.time()])
elif evt.type == self.xwiimote.EVENT_ACCEL:
x_value, y_value, z_value = evt.get_abs(0)
self.queue.put([3, x_value, y_value, z_value, time.time()])
if evt.type == self.xwiimote.EVENT_GONE:
self.queue.put([4])
local_loop_active = False
except IOError as ex:
if ex.errno != errno.EAGAIN:
print "Bad"
exit(0)
App()
exit(0)
| {
"repo_name": "GordonLesti/SlidingWindowFilter-experiment",
"path": "src/xwiimote_recorder.py",
"copies": "1",
"size": "4312",
"license": "mit",
"hash": -1697340315910570200,
"line_mean": 33.7741935484,
"line_max": 79,
"alpha_frac": 0.4782003711,
"autogenerated": false,
"ratio": 4.0910815939278935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001622172784575454,
"num_lines": 124
} |
# a simple script that will generate a forwarding DLL assembler listing
# for example, to create a forwarder for an ollydbg plugin to an immunity debugger plugin
# 1. edit DLLNAME and forwarders
# 2. run makefw.py <myfile.asm>
# 3. compile with yasm 'yasm -o <myfile.exe> <myfile.asm>"
DLLNAME = "multiasm"
forwarders = {
"IMMDBG_Pluginaction" : "_ODBG_Pluginaction" ,
"IMMDBG_Pluginclose" : "_ODBG_Pluginclose" ,
"IMMDBG_Plugindata" : "_ODBG_Plugindata" ,
"IMMDBG_Plugindestroy" : "_ODBG_Plugindestroy" ,
"IMMDBG_Plugininit" : "_ODBG_Plugininit" ,
"IMMDBG_Pluginmenu" : "_ODBG_Pluginmenu" ,
"IMMDBG_Pluginshortcut": "_ODBG_Pluginshortcut",
}
###############################################################################
import sys
TEMPLATE = """
%%include 'consts.inc'
IMAGEBASE equ 1000000h
org IMAGEBASE
bits 32
SECTIONALIGN equ 1000h
FILEALIGN equ 200h
istruc IMAGE_DOS_HEADER
at IMAGE_DOS_HEADER.e_magic, db 'MZ'
at IMAGE_DOS_HEADER.e_lfanew, dd NT_Signature - IMAGEBASE
iend
NT_Signature:
istruc IMAGE_NT_HEADERS
at IMAGE_NT_HEADERS.Signature, db 'PE', 0, 0
iend
istruc IMAGE_FILE_HEADER
at IMAGE_FILE_HEADER.Machine, dw IMAGE_FILE_MACHINE_I386
at IMAGE_FILE_HEADER.NumberOfSections, dw NUMBEROFSECTIONS
at IMAGE_FILE_HEADER.SizeOfOptionalHeader, dw SIZEOFOPTIONALHEADER
at IMAGE_FILE_HEADER.Characteristics, dw IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_32BIT_MACHINE | IMAGE_FILE_DLL
iend
OptionalHeader:
istruc IMAGE_OPTIONAL_HEADER32
at IMAGE_OPTIONAL_HEADER32.Magic, dw IMAGE_NT_OPTIONAL_HDR32_MAGIC
at IMAGE_OPTIONAL_HEADER32.AddressOfEntryPoint, dd EntryPoint - IMAGEBASE
at IMAGE_OPTIONAL_HEADER32.ImageBase, dd IMAGEBASE
at IMAGE_OPTIONAL_HEADER32.SectionAlignment, dd SECTIONALIGN
at IMAGE_OPTIONAL_HEADER32.FileAlignment, dd FILEALIGN
at IMAGE_OPTIONAL_HEADER32.MajorSubsystemVersion, dw 4
at IMAGE_OPTIONAL_HEADER32.SizeOfImage, dd 2 * SECTIONALIGN
at IMAGE_OPTIONAL_HEADER32.SizeOfHeaders, dd SIZEOFHEADERS
at IMAGE_OPTIONAL_HEADER32.Subsystem, dw IMAGE_SUBSYSTEM_WINDOWS_CUI
at IMAGE_OPTIONAL_HEADER32.NumberOfRvaAndSizes, dd 16
iend
DataDirectory:
istruc IMAGE_DATA_DIRECTORY_16
at IMAGE_DATA_DIRECTORY_16.ExportsVA, dd Exports_Directory - IMAGEBASE
at IMAGE_DATA_DIRECTORY_16.ExportsSize, dd EXPORTS_SIZE ; exports size is *REQUIRED* in this case
iend
SIZEOFOPTIONALHEADER equ $ - OptionalHeader
SectionHeader:
istruc IMAGE_SECTION_HEADER
at IMAGE_SECTION_HEADER.VirtualSize, dd 1 * SECTIONALIGN
at IMAGE_SECTION_HEADER.VirtualAddress, dd 1 * SECTIONALIGN
at IMAGE_SECTION_HEADER.SizeOfRawData, dd 1 * FILEALIGN
at IMAGE_SECTION_HEADER.PointerToRawData, dd 1 * FILEALIGN
at IMAGE_SECTION_HEADER.Characteristics, dd IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_WRITE
iend
NUMBEROFSECTIONS equ ($ - SectionHeader) / IMAGE_SECTION_HEADER_size
SIZEOFHEADERS equ $ - IMAGEBASE
section progbits vstart=IMAGEBASE + SECTIONALIGN align=FILEALIGN
EntryPoint:
push 1
pop eax
retn 3 * 4
_c
Exports_Directory:
Characteristics dd 0
TimeDateStamp dd 0
MajorVersion dw 0
MinorVersion dw 0
Name dd 0
Base dd 0
NumberOfFunctions dd NUMBER_OF_FUNCTIONS
NumberOfNames dd NUMBER_OF_NAMES
AddressOfFunctions dd address_of_functions - IMAGEBASE
AddressOfNames dd address_of_names - IMAGEBASE
AddressOfNameOrdinals dd address_of_name_ordinals - IMAGEBASE
_d
address_of_functions:
%(ADDRESS_OF_FUNCTIONS)s
NUMBER_OF_FUNCTIONS equ ($ - address_of_functions) / 4
_d
address_of_names:
%(ADDRESS_OF_NAMES)s
NUMBER_OF_NAMES equ ($ - address_of_names) / 4
_d
%(FORWARDER_NAMES)s
%(FORWARDED_EXPORTS)s
_d
address_of_name_ordinals:
dw %(ORDINALS)s
_d
a__exp__Export db 'ExitProcess', 0
_d
EXPORTS_SIZE equ $ - Exports_Directory
align FILEALIGN, db 0
"""
###############################################################################
ORDINALS = []
FORWARDER_NAMES = []
ADDRESS_OF_FUNCTIONS = []
FORWARDED_EXPORTS = []
ADDRESS_OF_NAMES = []
for i, forwarder in enumerate(forwarders):
ORDINALS.append("%i" % i)
forwarded = forwarders[forwarder]
FORWARDER_NAMES.append("""%(forwarded)s db "%(DLLNAME)s.%(forwarded)s", 0""" % locals())
FORWARDED_EXPORTS.append("""%(forwarder)s db "%(forwarder)s", 0""" % locals())
ADDRESS_OF_FUNCTIONS.append(""" dd %(forwarded)s - IMAGEBASE""" % locals())
ADDRESS_OF_NAMES.append(""" dd %(forwarder)s - IMAGEBASE""" % locals())
FORWARDER_NAMES = "\r\n".join(FORWARDER_NAMES)
ADDRESS_OF_FUNCTIONS = "\r\n".join(ADDRESS_OF_FUNCTIONS)
ADDRESS_OF_NAMES = "\r\n".join(ADDRESS_OF_NAMES)
FORWARDED_EXPORTS = "\r\n".join(FORWARDED_EXPORTS)
ORDINALS = ", ".join(ORDINALS)
with open(sys.argv[1], "wt") as f:
f.write(TEMPLATE % locals())
| {
"repo_name": "angea/corkami",
"path": "misc/python/makefw.py",
"copies": "1",
"size": "5192",
"license": "bsd-2-clause",
"hash": 7285663245564081000,
"line_mean": 31.2820512821,
"line_max": 122,
"alpha_frac": 0.6515793529,
"autogenerated": false,
"ratio": 3.3027989821882953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.943399885851019,
"avg_score": 0.0040758953156211294,
"num_lines": 156
} |
"""A simple script to create, train, validate, and save the default model"""
from __future__ import division, print_function
import datetime as dt
import time
import os
from nflwin import model
def main():
start = time.time()
win_probability_model = model.WPModel()
training_seasons = [2009, 2010, 2011, 2012, 2013, 2014]
validation_seasons = [2015]
season_types = ["Regular", "Postseason"]
win_probability_model.train_model(training_seasons=training_seasons,
training_season_types=season_types)
print("Took {0:.2f}s to build model".format(time.time() - start))
start = time.time()
max_deviation, residual_area = win_probability_model.validate_model(validation_seasons=validation_seasons,
validation_season_types=season_types)
print("Took {0:.2f}s to validate model, with a max residual of {1:.2f} and a residual area of {2:.2f}"
.format(time.time() - start, max_deviation, residual_area))
win_probability_model.save_model()
ax = win_probability_model.plot_validation(label="max deviation={0:.2f}, \n"
"residual total area={1:.2f}"
"".format(max_deviation, residual_area))
curr_datetime = dt.datetime.now()
ax.set_title("Model Generated At: " + curr_datetime.strftime("%Y-%m-%d %H:%M:%S"))
ax.legend(loc="lower right", fontsize=10)
ax.text(0.02, 0.98, ("Data from: {0:s}\n"
"Training season(s): {1:s}\n"
"Validation season(s): {2:s}"
"".format(", ".join(season_types),
", ".join(str(year) for year in training_seasons),
", ".join(str(year) for year in validation_seasons))),
ha="left", va="top", fontsize=10, transform=ax.transAxes)
this_filepath = os.path.dirname(os.path.abspath(__file__))
save_filepath = os.path.join(this_filepath, "doc", "source", "_static", "validation_plot.png")
ax.figure.savefig(save_filepath)
if __name__ == "__main__":
main()
| {
"repo_name": "AndrewRook/PyWPA",
"path": "make_default_model.py",
"copies": "2",
"size": "2244",
"license": "mit",
"hash": -2067965776947239400,
"line_mean": 43.88,
"line_max": 110,
"alpha_frac": 0.5592691622,
"autogenerated": false,
"ratio": 3.849056603773585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5408325765973585,
"avg_score": null,
"num_lines": null
} |
# A simple script to generate fake data
import datetime as dt
import json
import math
import random
import sys
USAGE = "Usage: python make_fake_fixtures.py [num_of_members] [num_of_games] [num_of_tournaments]"
# Fake Names: First and Last
GIVEN_NAMES = ["bruce", "malcolm", "kobe", "peter", "kaylee", "inara"]
LAST_NAMES = ["lee", "reynolds", "bryant", "parker", "frye", "serra"]
# Misc Chapter Codes
CHAPTER_CODES = ["FFLY", "NBAG", "SJGC", "TPGC", None]
CHAPTER_NAMES = [
"Fire Fly Go Club",
"NBA Go Club",
"San Jose Go Club",
"Tampa Go Club",
None,
]
# State Names and City Names
STATE_CODES = ["CA", "OR", "NY", "AZ", "AR", "FL", "KS", "KY", "IA"]
CITY_NAMES = [
"Aurora",
"Austin",
"Boston",
"Chandler",
"Charlotte",
"Dallas",
"Dayton",
"Eugene",
]
# Country Names and Codes
COUNTRY_NAMES = ["United States", "Canada", "Japan", "Korea", "China", "Tywain"]
COUNTRY_CODES = [
"US",
"CA",
"JP",
"KO",
"CH",
"TW",
] # these, oddly, are not the FK in the member table.
# Membership Status Codes
STATUS_CODES = ["accepted"]
# Membership Types
MEMBERSHIP_TYPES = ["Full", "Sustainer", "Sponser", "Lifetime", "E-Journal"]
if len(sys.argv) != 4:
print(USAGE)
quit()
try:
member_count = int(sys.argv[1])
game_count = int(sys.argv[2])
tourney_count = int(sys.argv[3])
except ValueError:
print(USAGE)
quit()
member_ids = [x for x in range(1, member_count + 1)]
tournament_ids = ["T%s" % x for x in range(1, tourney_count + 1)]
members = []
players = []
for member_id in member_ids:
date = dt.date.today() - dt.timedelta(days=random.randint(2, 150))
join_date = date - dt.timedelta(days=150)
renewal_due = date + dt.timedelta(days=random.randint(2, 720))
first_name = random.choice(GIVEN_NAMES)
last_name = random.choice(LAST_NAMES)
members.append(
{
"pk": member_id,
"model": "agagd_core.member",
"fields": {
"member_id": member_id,
"legacy_id": random.choice(range(1, member_count + 1)),
"full_name": "%s %s" % (first_name, last_name),
"given_names": first_name,
"family_name": last_name,
"join_date": join_date.strftime("%Y-%m-%d"),
"renewal_due": renewal_due.strftime("%Y-%m-%d"),
"city": "Seattle",
"state": random.choice(STATE_CODES),
"status": random.choice(STATUS_CODES),
"region": "some region",
"country": random.choice(COUNTRY_NAMES),
"chapter": random.choice(CHAPTER_CODES),
"chapter_id": random.choice(range(1, len(CHAPTER_CODES) + 1)),
"occupation": "",
"citizen": random.choice(range(0, 1)),
"password": "hallo!",
"type": random.choice(MEMBERSHIP_TYPES),
"last_changed": date.strftime("%Y-%m-%d 00:00:00Z"),
},
}
)
players.append(
{
"pk": member_id,
"model": "agagd_core.players",
"fields": {
"elab_date": date.strftime("%Y-%m-%d"),
"name": first_name,
"last_name": last_name,
"rating": random.uniform(-15, 10),
"sigma": random.random(),
},
}
)
ratings = []
ratings_range = list(range(0, 25))
for member_id in member_ids:
for rating_id in ratings_range:
elab_date = dt.date.today() - dt.timedelta(days=random.randint(2, 20))
player_rating = players[member_id - 1]["fields"]["rating"]
player_low_rating = player_rating - random.randint(0, 3)
player_high_rating = player_rating - random.randint(0, 3)
ratings.append(
{
"pk": None,
"model": "agagd_core.rating",
"fields": {
"pin_player": member_id,
"elab_date": elab_date.strftime("%Y-%m-%d"),
"rating": random.uniform(player_low_rating, player_high_rating),
"tournament": random.choice(tournament_ids),
"sigma": random.random(),
},
}
)
tournaments = []
for tourney_id in tournament_ids:
date = dt.date.today() - dt.timedelta(days=random.randint(2, 20))
elab_date = date + dt.timedelta(days=7)
random_state = random.choice(STATE_CODES)
tournaments.append(
{
"pk": tourney_id,
"model": "agagd_core.tournament",
"fields": {
"total_players": random.randint(4, 20),
"city": random.choice(CITY_NAMES),
"elab_date": elab_date.strftime("%Y-%m-%d"),
"description": random_state + tourney_id,
"wall_list": "1: Mal, Bruce 2d 2+/w0 3+/w0 4+/w0 3-0-0\n"
"2: Lee, Parker 1d 1-/b2 4+/w0 3-/w0 1-2-0\n"
"3: Lee, Matt 1k 4-/w0 1-/b6 2+/b4 1-2-0\n"
"4: Frye, Sam 3k 3+/b2 2-/b6 1-/b8 1-2-0\n"
"Note: This is not generated by the AGAGD.",
"state": random_state,
"rounds": random.randint(2, 5),
"tournament_date": date.strftime("%Y-%m-%d"),
},
}
)
games = []
for game_id in range(1, game_count + 1):
p1 = random.choice(member_ids)
p2 = random.choice([member_id for member_id in member_ids if member_id != p1])
color_1 = random.choice(["B", "W"])
color_2 = "B" if color_1 != "B" else "W"
date = dt.date.today() - dt.timedelta(days=random.randint(2, 20))
elab_date = date + dt.timedelta(days=7)
games.append(
{
"pk": game_id,
"model": "agagd_core.game",
"fields": {
"pin_player_2": p2,
"tournament_code": random.choice(tournaments)["pk"],
"rated": random.randint(0, 1),
"elab_date": elab_date.strftime("%Y-%m-%d"),
"handicap": random.randint(0, 9),
"online": random.randint(0, 1),
"color_2": color_2,
"sgf_code": "",
"komi": random.randint(0, 9),
"pin_player_1": p1,
"rank_1": "",
"result": random.choice(["B", "W"]),
"rank_2": "",
"game_date": date.strftime("%Y-%m-%d"),
"exclude": random.randint(0, 1),
"round": random.randint(2, 5),
"color_1": color_1,
},
}
)
chapters = []
for member_id in range(0, len(CHAPTER_CODES)):
chapters.append(
{
"pk": member_id + 1,
"model": "agagd_core.chapters",
"fields": {
"member_id": member_id + 1,
"code": CHAPTER_CODES[member_id],
"name": CHAPTER_NAMES[member_id],
"contact_text": random.choice(["Some contact info would go here.", ""]),
"contact": "Some guy",
"meeting_city": "Seattle",
"url": "www.localhost-is-best-host.com",
"display": random.randint(0, 1),
},
}
)
countries = []
for i, count_name in enumerate(COUNTRY_NAMES):
countries.append(
{
"pk": i,
"model": "agagd_core.country",
"fields": {
"country_code": random.choice(COUNTRY_CODES),
"country_descr": count_name,
},
}
)
print(
json.dumps(
members + players + ratings + tournaments + games + chapters + countries,
indent=4,
)
)
| {
"repo_name": "usgo/agagd",
"path": "scripts/make_fake_fixtures.py",
"copies": "1",
"size": "7869",
"license": "mit",
"hash": 1389344228129396000,
"line_mean": 32.0630252101,
"line_max": 103,
"alpha_frac": 0.4895158216,
"autogenerated": false,
"ratio": 3.3090832632464258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9296240406645657,
"avg_score": 0.00047173564015382327,
"num_lines": 238
} |
#A simple script to read values from a delta inverter and post them to
#PVoutput.org
import time, subprocess,serial,argparse,sys
from deltaInv import DeltaInverter
from time import localtime, strftime
#PVOutput.org API Values - UPDATE THESE TO YOURS!
SYSTEMID=""
APIKEY=""
parser = argparse.ArgumentParser()
if __name__ == '__main__':
parser.add_argument('-v','--verbose',help='Enable verbose output to stderr',default=False,dest='verbose_mode',required=False,action='store_true')
parser_results = parser.parse_args()
#Edit your serial connection as required!!
connection = serial.Serial('/dev/ttyUSB0',19200,timeout=0.2);
localtime = time.localtime(time.time())
t_date = 'd={0}'.format(strftime('%Y%m%d'))
t_time = 't={0}'.format(strftime('%H:%M'))
inv1 = DeltaInverter(1) #init Inverter 1
#Get the Daily Energy thus far
cmd = inv1.getCmdStringFor('Day Wh')
connection.write(cmd)
response = connection.read(100)
#if no response the inverter is asleep
if response:
value = inv1.getValueFromResponse(response)
t_energy = 'v1={0}'.format(value)
#instanteous power
cmd = inv1.getCmdStringFor('AC Power')
connection.write(cmd)
response = connection.read(100)
value = inv1.getValueFromResponse(response)
t_power = 'v2={0}'.format(value)
#AC Voltage
cmd = inv1.getCmdStringFor('AC Volts')
connection.write(cmd)
response = connection.read(100)
value = inv1.getValueFromResponse(response)
t_volts = 'v6={0}'.format(value)
#Temp - this appears to be onboard somewhere not the heatsink
cmd = inv1.getCmdStringFor('DC Temp')
connection.write(cmd)
response = connection.read(100)
value = inv1.getValueFromResponse(response)
t_temp = 'v5={0}'.format(value)
#if verbose mode
if parser_results.verbose_mode==True:
sys.stderr.write('Date: %s, Time: %s\n' %(t_date, t_time))
sys.stderr.write('Energy Today: %sWh, Instantaneous Power: %sW\n' %(t_energy,t_power))
sys.stderr.write('Volts: %s, Temp: %s oC\n' % (t_volts,t_temp))
sys.stderr.flush()
#Send it all off to PVOutput.org
cmd = ['/usr/bin/curl',
'-d', t_date,
'-d', t_time,
'-d', t_energy,
'-d', t_power,
'-d', t_volts,
'-d', t_temp,
'-H', 'X-Pvoutput-Apikey: ' + APIKEY,
'-H', 'X-Pvoutput-SystemId: ' + SYSTEMID,
'http://pvoutput.org/service/r1/addstatus.jsp']
ret = subprocess.call (cmd)
else:
print "No response from inverter - shutdown? No Data sent to PVOutput.org"
connection.close()
| {
"repo_name": "stik79/DeltaPVOutput",
"path": "DeltaPVOutput.py",
"copies": "1",
"size": "2775",
"license": "mit",
"hash": 2367953907977773000,
"line_mean": 33.5769230769,
"line_max": 149,
"alpha_frac": 0.6079279279,
"autogenerated": false,
"ratio": 3.3035714285714284,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9265848387110893,
"avg_score": 0.02913019387210705,
"num_lines": 78
} |
#A simple script to read values from a delta inverter and post them to
#PVoutput.org
import time, subprocess,serial
from deltaInv import DeltaInverter
from time import localtime, strftime
#PVOutput.org API Values - UPDATE THESE TO YOURS!
#Array of SystemID's that correlate to the inverter number
#ie. the first systemID will be inverter 1, second inverter 2 etc
#the numebr of systemIDs must still be the same as the number of inverter- even if they
#are empty.
#an empty string means don't individually update this inverter on pvoutput.org - just do the totals
SYSTEMIDS=["",""]
#System ID of the total/avg values.
TOTALSYSTEMID=""
APIKEY=""
if __name__ == '__main__':
#Edit your serial connection as required!!
connection = serial.Serial('/dev/ttyUSB0',19200,timeout=0.2);
localtime = time.localtime(time.time())
t_date = 'd={0}'.format(strftime('%Y%m%d'))
t_time = 't={0}'.format(strftime('%H:%M'))
totalWh =0
totalPower =0
avgACVolts=0
avgTempDC=0
validInv =0
for index in range(len(SYSTEMIDS)):
inv = DeltaInverter(index+1) #init Inverter 1
#Get the Daily Energy thus far
cmd = inv.getCmdStringFor('Day Wh')
connection.write(cmd)
response = connection.read(100)
#if no response the inverter is asleep
if response:
validInv+=1
value = inv.getValueFromResponse(response)
totalWh+=int(value)
t_energy = 'v1={0}'.format(value)
#instanteous power
cmd = inv.getCmdStringFor('AC Power')
connection.write(cmd)
response = connection.read(100)
value = inv.getValueFromResponse(response)
totalPower+=int(value)
t_power = 'v2={0}'.format(value)
#AC Voltage
cmd = inv.getCmdStringFor('AC Volts')
connection.write(cmd)
response = connection.read(100)
value = inv.getValueFromResponse(response)
avgACVolts+=float(value)
t_volts = 'v6={0}'.format(value)
#Temp - this appears to be onboard somewhere not the heatsink
cmd = inv.getCmdStringFor('DC Temp')
connection.write(cmd)
response = connection.read(100)
value = inv.getValueFromResponse(response)
avgTempDC+=int(value)
t_temp = 'v5={0}'.format(value)
if not SYSTEMIDS[index]=="":
#Send it all off to PVOutput.org
cmd = ['/usr/bin/curl',
'-d', t_date,
'-d', t_time,
'-d', t_energy,
'-d', t_power,
'-d', t_volts,
'-d', t_temp,
'-H', 'X-Pvoutput-Apikey: ' + APIKEY,
'-H', 'X-Pvoutput-SystemId: ' + SYSTEMID[index],
'http://pvoutput.org/service/r1/addstatus.jsp']
ret = subprocess.call (cmd)
else:
print "No response from inverter %d - shutdown? No Data sent to PVOutput.org"% (index+1)
if validInv !=0:
print "%d awake Inverters" % validInv
avgACVolts=avgACVolts/validInv
avgTempDC=avgTempDC/validInv
t_energy = 'v1={0}'.format(totalWh)
t_power = 'v2={0}'.format(totalPower)
t_volts = 'v6={0}'.format(avgACVolts)
t_temp = 'v5={0}'.format(avgTempDC)
#Send it all off to PVOutput.org
cmd = ['/usr/bin/curl',
'-d', t_date,
'-d', t_time,
'-d', t_energy,
'-d', t_power,
'-d', t_volts,
'-d', t_temp,
'-H', 'X-Pvoutput-Apikey: ' + APIKEY,
'-H', 'X-Pvoutput-SystemId: ' + TOTALSYSTEMID,
'http://pvoutput.org/service/r1/addstatus.jsp']
ret = subprocess.call (cmd)
else:
print "No response from any inverter - shutdown? No Data sent to PVOutput.org"
connection.close()
| {
"repo_name": "stik79/DeltaPVOutput",
"path": "MultipleDeltaPVOutput.py",
"copies": "1",
"size": "3846",
"license": "mit",
"hash": -2255734265482382800,
"line_mean": 31.1551724138,
"line_max": 99,
"alpha_frac": 0.5813832553,
"autogenerated": false,
"ratio": 3.218410041841004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42997932971410036,
"avg_score": null,
"num_lines": null
} |
"""A simple script to recourse over a directory and rewrite files to an ACM repo.
ACM repos are structured a certain way. In particular cluster and namespace
resources need to be in different directories. This script takes as
an input a directory and rewrites all the resources into the target
directory.
"""
import argparse
import logging
import os
import re
import yaml
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"--source", default=os.getcwd(), type=str,
help=("The path to recourse over looking for YAML files"))
parser.add_argument(
"--dest", default=os.getcwd(), type=str,
help=("Root of the ACM repo to write to."))
args = parser.parse_args()
for root, _, files in os.walk(args.source):
for f in files:
_, ext = os.path.splitext(f)
if not ext.lower() in [".yaml", ".yml"]:
continue
path = os.path.join(root, f)
# Ensure annotations is a map. This is another error we are seeing
with open(path) as hf:
contents = yaml.load_all(hf)
for o in contents:
if not o:
continue
api_version = o.get("apiVersion")
metadata = o.get("metadata")
namespace = metadata.get("namespace")
kind = o.get("kind")
name = metadata.get("name")
filename = "_".join([api_version.replace("/", "_"),
kind,
name]) + ".yaml"
if kind.lower() == "namespace":
filename = "namespace.yaml"
outdir = os.path.join(args.dest, "namespaces", name)
elif not namespace:
outdir = os.path.join(args.dest, "cluster",)
else:
outdir = os.path.join(args.dest, "namespaces", namespace)
if not os.path.exists(outdir):
os.makedirs(outdir)
logging.info("Creating directory %s", outdir)
with open(os.path.join(outdir, filename), "w") as hf:
yaml.dump(o, hf)
| {
"repo_name": "kubeflow/gcp-blueprints",
"path": "kubeflow/hack/to_acm_structure.py",
"copies": "1",
"size": "2259",
"license": "apache-2.0",
"hash": -189903350661568700,
"line_mean": 29.527027027,
"line_max": 81,
"alpha_frac": 0.5816733068,
"autogenerated": false,
"ratio": 3.921875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9885086768603174,
"avg_score": 0.023692307639365178,
"num_lines": 74
} |
# A simple script to snag deprecated proto fields and add them to runtime_features.cc
from __future__ import print_function
import re
import subprocess
import fileinput
from six.moves import input
# Sorts out the list of deprecated proto fields which should be disallowed and returns a tuple of
# email and code changes.
def deprecate_proto():
grep_output = subprocess.check_output('grep -r "deprecated = true" api/*', shell=True)
filenames_and_fields = set()
# Compile the set of deprecated fields and the files they're in, deduping via set.
deprecated_regex = re.compile(r'.*\/([^\/]*.proto):[^=]* ([^= ]+) =.*')
for byte_line in grep_output.splitlines():
line = str(byte_line)
match = deprecated_regex.match(line)
if match:
filenames_and_fields.add(tuple([match.group(1), match.group(2)]))
else:
print('no match in ' + line + ' please address manually!')
# Now discard any deprecated features already listed in runtime_features
exiting_deprecated_regex = re.compile(r'.*"envoy.deprecated_features.(.*):(.*)",.*')
with open('source/common/runtime/runtime_features.cc', 'r') as features:
for line in features.readlines():
match = exiting_deprecated_regex.match(line)
if match:
filenames_and_fields.discard(tuple([match.group(1), match.group(2)]))
# Finally sort out the code to add to runtime_features.cc and a canned email for envoy-announce.
code_snippets = []
email_snippets = []
for (filename, field) in filenames_and_fields:
code_snippets.append(' "envoy.deprecated_features.' + filename + ':' + field + '",\n')
email_snippets.append(field + ' from ' + filename + '\n')
code = ''.join(code_snippets)
email = ''
if email_snippets:
email = ('\nThe following deprecated configuration fields will be disallowed by default:\n' +
''.join(email_snippets))
return email, code
# Gather code and suggested email changes.
deprecate_email, deprecate_code = deprecate_proto()
email = ('The Envoy maintainer team is cutting the next Envoy release. In the new release ' +
deprecate_email)
print('\n\nSuggested envoy-announce email: \n')
print(email)
if not input('Apply relevant runtime changes? [yN] ').strip().lower() in ('y', 'yes'):
exit(1)
for line in fileinput.FileInput('source/common/runtime/runtime_features.cc', inplace=1):
if 'envoy.deprecated_features.deprecated.proto:is_deprecated_fatal' in line:
line = line.replace(line, line + deprecate_code)
print(line, end='')
print('\nChanges applied. Please send the email above to envoy-announce.\n')
| {
"repo_name": "eklitzke/envoy",
"path": "tools/deprecate_features/deprecate_features.py",
"copies": "6",
"size": "2596",
"license": "apache-2.0",
"hash": -4001395691198682000,
"line_mean": 37.7462686567,
"line_max": 98,
"alpha_frac": 0.6926040062,
"autogenerated": false,
"ratio": 3.620641562064156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012629406802863107,
"num_lines": 67
} |
# A simple script to snag deprecated proto fields and add them to runtime_features.h
import re
import subprocess
import fileinput
# Sorts out the list of deprecated proto fields which should be disallowed and returns a tuple of
# email and code changes.
def deprecate_proto():
grep_output = subprocess.check_output('grep -r "deprecated = true" api/*', shell=True)
filenames_and_fields = set()
# Compile the set of deprecated fields and the files they're in, deduping via set.
deprecated_regex = re.compile(r'.*\/([^\/]*.proto):[^=]* ([^= ]+) =.*')
for line in grep_output.splitlines():
match = deprecated_regex.match(line)
if match:
filenames_and_fields.add(tuple([match.group(1), match.group(2)]))
else:
print 'no match in ' + line + ' please address manually!'
# Now discard any deprecated features already listed in runtime_features
exiting_deprecated_regex = re.compile(r'.*"envoy.deprecated_features.(.*):(.*)",.*')
with open('source/common/runtime/runtime_features.cc', 'r') as features:
for line in features.readlines():
match = exiting_deprecated_regex.match(line)
if match:
filenames_and_fields.discard(tuple([match.group(1), match.group(2)]))
# Finally sort out the code to add to runtime_features.cc and a canned email for envoy-announce.
code_snippets = []
email_snippets = []
for (filename, field) in filenames_and_fields:
code_snippets.append(' "envoy.deprecated_features.' + filename + ':' + field + '",\n')
email_snippets.append(field + ' from ' + filename + '\n')
code = ''.join(code_snippets)
email = ''
if email_snippets:
email = ('\nThe following deprecated configuration fields will be disallowed by default:\n' +
''.join(email_snippets))
return email, code
# Sorts out the list of features which should be default enabled and returns a tuple of
# email and code changes.
def flip_runtime_features():
grep_output = subprocess.check_output(
'grep -r "envoy.reloadable_features\." source/*', shell=True)
features_to_flip = set()
# Compile the set of features to flip, deduping via set.
deprecated_regex = re.compile(r'.*"(envoy.reloadable_features\.[^"]+)".*')
for line in grep_output.splitlines():
match = deprecated_regex.match(line)
if match:
features_to_flip.add(match.group(1))
else:
print 'no match in ' + line + ' please address manually!'
# Exempt the two test flags.
features_to_flip.remove('envoy.reloadable_features.my_feature_name')
features_to_flip.remove('envoy.reloadable_features.test_feature_true')
code_snippets = []
email_snippets = []
for (feature) in features_to_flip:
code_snippets.append(' "' + feature + '",\n')
email_snippets.append(feature + '\n')
code = ''.join(code_snippets)
email = ''
if email_snippets:
email = 'the following features will be defaulted to true:\n' + ''.join(email_snippets)
return email, code
# Gather code and suggested email changes.
runtime_email, runtime_features_code = flip_runtime_features()
deprecate_email, deprecate_code = deprecate_proto()
email = ('The Envoy maintainer team is cutting the next Envoy release. In the new release ' +
runtime_email + deprecate_email)
print '\n\nSuggested envoy-announce email: \n'
print email
if not raw_input('Apply relevant runtime changes? [yN] ').strip().lower() in ('y', 'yes'):
exit(1)
for line in fileinput.FileInput('source/common/runtime/runtime_features.cc', inplace=1):
if 'envoy.reloadable_features.test_feature_true' in line:
line = line.replace(line, line + runtime_features_code)
if 'envoy.deprecated_features.deprecated.proto:is_deprecated_fatal' in line:
line = line.replace(line, line + deprecate_code)
print line,
print '\nChanges applied. Please send the email above to envoy-announce.\n'
| {
"repo_name": "dnoe/envoy",
"path": "tools/deprecate_features/deprecate_features.py",
"copies": "1",
"size": "3852",
"license": "apache-2.0",
"hash": 2288945230812145700,
"line_mean": 37.1386138614,
"line_max": 98,
"alpha_frac": 0.6913291796,
"autogenerated": false,
"ratio": 3.556786703601108,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4748115883201108,
"avg_score": null,
"num_lines": null
} |
### Designed to be run inside of Pythonista on an iOS device.
### This limits some of the libraries we can use as Pythonista
### has a pretty limited subset up Python (hence we get to re-invent
### the wheel here). On the other hand, PROGRAMING! On iOS! So that's
### useful.
import json
import sys
import urllib2
import os
import plistlib
import webbrowser
from urllib import quote_plus
from hashlib import md5
# You can get an API key and shared secret here at
# https://www.rememberthemilk.com/services/api/keys.rtm
API_KEY = 'YOUR-API-KEY-HERE'
SHARED_SECRET = 'YOUR-SHARED-SECRET-HERE'
AUTH_URL='http://www.rememberthemilk.com/services/auth/'
SERVICE_URL = 'https://api.rememberthemilk.com/services/rest/'
AUTH_FILENAME = 'rtm_auth.plist'
class RTM(object):
"""
Handles all interaction with Remember The Milk
"""
def __init__(self, api_key, shared_secret):
"""
Parameters:
api_key - The RTM API key
shared_secret - The RTM shared secret
"""
self.api_key = api_key
self.shared_secret = shared_secret
self.frob = None
self.token = None
def _sign(self, args):
"""
Take the arguments to an API call and sign them.
See https://www.rememberthemilk.com/services/api/authentication.rtm for more info
"""
elements = [self.shared_secret]
for key in sorted(args.keys()):
elements.append(key)
elements.append(args[key].encode('utf-8'))
return md5(''.join(elements)).hexdigest()
def call(self, method, call_args={}):
"""
Calls an API method and decodes the result
Arguments:
method - The method we're calling (e.g., "rtm.tasks.add")
call_args - Any named arguments required by that method
"""
args = dict(call_args)
args['method'] = method
args['format'] = 'json'
args['api_key'] = self.api_key
if self.token is not None:
args['auth_token'] = self.token
sig = self._sign(args)
url = "%s?%s&api_sig=%s" % (SERVICE_URL, '&'.join([k + '=' + quote_plus(args[k]) for k in args.keys()]), sig)
response = urllib2.urlopen(url)
return json.loads(response.read())
def getFrob(self):
"""
Get's a frob so we can authenticate against RTM. The frob is used to keep track of the login process.
https://www.rememberthemilk.com/services/api/methods/rtm.auth.getFrob.rtm
"""
results = self.call('rtm.auth.getFrob')
frob = results['rsp']['frob']
return frob
def getAuthURL(self, perms='delete'):
"""
Get's a URL to allow us to continue with auth.
See the "Desktop" section on https://www.rememberthemilk.com/services/api/authentication.rtm
Parameters:
perms - The level of access for the login (one of "read", "write" or "delete")
"""
frob = self.getFrob()
args = {'api_key': self.api_key,
'perms': perms,
'frob': frob}
sig = self._sign(args)
url = "%s?%s&api_sig=%s" % (AUTH_URL, '&'.join([k + '=' + quote_plus(args[k]) for k in args.keys()]), sig)
self.frob = frob
return url
def finishAuth(self):
"""
Finishes the Auth process started by getAuthURL().
"""
results = self.call('rtm.auth.getToken', {'frob': self.frob})
self.token = results['rsp']['auth']['token']
plistlib.writePlist(results['rsp'], AUTH_FILENAME)
return self.token
def addTask(self, name, timeline=None):
"""
Adds a task to RTM. If no timeline is specified, one is created. We use RTM's advanced parsing (so
that saying something like "Give up all hope tomorrow !2" yields a task ("give up all hope") due
tomorrow with a priority of 2.
If the parser doesn't give it a due date, set it to today (which works well with the RTM's Gmail
integration).
Parameters:
name - The name of the task (to be parsed by RTM).
timeline - The timeline to add the task to. Optional, but if you have several tasks to add,
it's faster to only create one.
"""
if timeline is None:
timeline = self.createTimeline()
result = self.call('rtm.tasks.add',
{'timeline': timeline,
'name': name,
'parse': '1'})
lst = result['rsp']['list']
taskseries = lst['taskseries']
task = taskseries['task']
# If there's no due date, set it as today
if task['due'] == '':
self.call('rtm.tasks.setDueDate',
{'timeline': timeline,
'task_id': task['id'],
'taskseries_id': taskseries['id'],
'list_id': lst['id'],
'parse': '1',
'due': 'today'})
def createTimeline(self):
"""
All calls to create tasks require a timeline. This creates one.
"""
resp = self.call('rtm.timelines.create')
return resp['rsp']['timeline']
def login(self, perms='delete'):
"""
This performs the login to RTM. It first checks to see if it has credentials
from a previous run. If so, it checks to see if the token is still good.
If there is no previous token, or RTM isn't accepting it for whatever reason,
it launches a modal webbrowser to handle auth and then completes the process.
Parameters:
perms - The level of access we're requesting (one of "read", "write" or "delete")
"""
if os.path.exists(AUTH_FILENAME):
auth = plistlib.readPlist(AUTH_FILENAME)
if auth is not None:
self.token = auth['auth']['token']
rsp = self.call('rtm.auth.checkToken' )
try:
if rsp['rsp']['stat'] == 'ok':
return self.token
except:
pass # fall through, something blew up
# If that doesn't work for any reason, delete those files and log in
self.token = None
os.remove(AUTH_FILENAME)
url = self.getAuthURL(perms='write')
webbrowser.open(url, modal=True)
self.finishAuth()
# Our main body, log in, create the tasks, and head back to drafts.
if __name__ == "__main__":
rtm = RTM(API_KEY, SHARED_SECRET)
rtm.login(perms='write')
# I find a little feedback about this point in the process is nice
print "Creating task(s)"
timeline = rtm.createTimeline()
for task in [s.strip() for s in sys.argv[1].split("\n")]:
if len(task) > 0:
rtm.addTask(task, timeline)
# We're done, go back to Drafts
webbrowser.open("drafts://", modal=False, stop_when_done=True)
| {
"repo_name": "lvaughn/py-rtm-insert",
"path": "py_rtm_insert.py",
"copies": "1",
"size": "7401",
"license": "bsd-3-clause",
"hash": -2532028572394056700,
"line_mean": 36.1909547739,
"line_max": 117,
"alpha_frac": 0.5741116065,
"autogenerated": false,
"ratio": 3.8426791277258565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49167907342258566,
"avg_score": null,
"num_lines": null
} |
# a simple script to trim MNGs and corresponding WAVs without recompression
# Ange Albertini, BSD Licence 2013
# only MHRD-(IHDR/[PLTE/]IDAT/IEND)*-MEND are supported
# tEXt and FRAM are dropped
# complex framerate via FRAM not supported
# 1- run with -d <STARTFRAME> <RANGE> to get xxx frames
# 2- run with -t <FRAMENUMBER> to trim until the chosen is the 1st one
FRAMES_PER_PNG = 3 # FRAM chunks are ignored, PLTE are not expected
import struct
import sys
import wave
PNGSIG = "\x89PNG\x0d\x0a\x1a\x0a"
MNGSIG = "\x8AMNG\x0d\x0a\x1a\x0a"
def ReadNG(fn):
with open(fn, "rb") as f:
r = f.read()
chunks = []
cursor = 0
sig = r[cursor:cursor + 8]
cursor += 8
if sig not in [PNGSIG, MNGSIG]:
print "wrong sig"
sys.exit()
while cursor < len(r):
offset = cursor
size = struct.unpack(">I", r[cursor:cursor + 4])[0]
cursor += 4
type_ = r[cursor: cursor + 4]
cursor += 4
datacrc = r[cursor: cursor + size + 4]
cursor += size + 4
if type_ in ["tEXt", "FRAM"]:
continue
chunks += [[offset, datacrc, type_]] # ugly, lazy :p
return chunks
def WriteChunk(t, c):
t.write(struct.pack(">I", len(c[1]) - 4))
t.write(c[2])
t.write(c[1])
def parseMHDR(c):
assert c[2] == "MHDR"
x, y, ticks = struct.unpack(">III", c[1][:4 * 3])
return x, y, ticks
def WritePNGFrame(i):
with open("%s%08i.png" % (fnt, i), "wb") as t:
t.write(PNGSIG)
for ii in xrange(FRAMES_PER_PNG):
WriteChunk(t, chunks[i * FRAMES_PER_PNG + 1 + ii])
def TrimMNG(fn, frames): # skip i frames
with open("trimmed-%s.mng" % fn, "wb") as t:
t.write(MNGSIG)
WriteChunk(t, chunks[0])
for c in chunks[1 + frames * FRAMES_PER_PNG:]:
WriteChunk(t, c)
def TrimWav(fn, frames, fps):
s = wave.open("%s.wav" % fn, 'r')
framerate = s.getframerate()
skip = frames * framerate / fps
total = s.getnframes()
s.setpos(skip)
d = s.readframes(total - skip)
t = wave.open("trimmed-%s.wav" % fn, 'w')
t.setparams(s.getparams())
t.writeframes(d)
t.close()
s.close()
fn = sys.argv[-1]
fnt = fn.replace(".mng", "")
# ugly :p
args = sys.argv[1:]
DUMP_START, DUMP_RANGE, SKIP = 0, 0, 0
if args[0] == "-d": #
DUMP_START, DUMP_RANGE = int(args[1]), int(args[2])
elif args[0] == "-t": # trim mode
SKIP = int(args[1])
chunks = ReadNG(fn)
x, y, ticks = parseMHDR(chunks[0]) # ticks will be not enough if FRAM chunks are used
print "MNG %i x %i, %i f/s" % (x, y, ticks),
print "(%i frames)" % ((len(chunks) - 2) / FRAMES_PER_PNG) # 1 MHDR + 1 MEND to be skipped
# dump PNG frames
for i in xrange(DUMP_RANGE):
WritePNGFrame(DUMP_START + i)
#trim MNG and WAV
if SKIP != 0:
TrimMNG(fnt, SKIP)
TrimWav(fnt, SKIP, ticks)
| {
"repo_name": "angea/corkami",
"path": "misc/python/trimmer.py",
"copies": "1",
"size": "2969",
"license": "bsd-2-clause",
"hash": -3952667479980866000,
"line_mean": 23.5948275862,
"line_max": 90,
"alpha_frac": 0.5611316942,
"autogenerated": false,
"ratio": 2.8062381852551983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3867369879455198,
"avg_score": null,
"num_lines": null
} |
"""A simple server for central log processing from multiple threads or
processes.
"""
import logging
import multiprocessing
import warnings
from .server import LogServer, LogServerProcess, LogServerThread
from ._constants import DEFAULT_FORMAT
__version__ = "0.3.1"
def run_server(handlers=[], host=None, port=None, level=logging.INFO,
done=None, ready=None):
"""Creates a new :class:`LogServer` and starts it. This is intended as a
target function for a thread or process and is included for backwards
compatibility. For more flexibility, it is recommended to use
:class:`LogServerThread` or :class:`LogServerProcess` directly.
:param list handlers: List of log handlers to use on the server.
:param str host: Host to bind socket to.
:param int port: Port number to bind socket to.
:param int level: Log level threshold.
:param done: :class:`threading.Event` or :class:`multiprocessing.Event` to
signal the server to stop.
:param ready: :class:`threading.Event` or :class:`multiprocessing.Event` to
indicate to the parent process that the server is ready.
"""
server = LogServer(handlers, host, port, level)
# Setting this to use the multiprocessing versions for most flexibility.
server.done = done if done is not None else multiprocessing.Event()
server.ready = ready if ready is not None else multiprocessing.Event()
server.run()
def get_logger(name, host="127.0.0.1", port=9123, level=logging.INFO,
stream_handler=True, stream_fmt=None):
"""Get or create a logger and setup appropriately. For loggers
running outside of the main process, this must be called after the
process has been started (i.e., in the :func:`run` method of a
:class:`multiprocessing.Process` instance).
:param str name: Name of the logger.
:param str host: Host address.
:param int port: Port.
:param int level: Minimum log level.
:param bool stream_handler: Add a :class:`logging.StreamHandler` to the
logger.
:param stream_fmt: Format to use when ``stream_handler`` is set.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
if len(logger.handlers) > 0:
return logger # logger already configured
logger.addHandler(logging.handlers.DatagramHandler(host, port))
if stream_handler:
if stream_fmt is None:
stream_fmt = DEFAULT_FORMAT
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(logging.Formatter(stream_fmt))
logger.addHandler(handler)
return logger
def create_logger(*args, **kwargs):
warnings.warn("Using logserver.create_logger is deprecated. "
"Please use logserver.get_logger instead.",
DeprecationWarning)
return get_logger(*args, **kwargs)
| {
"repo_name": "mivade/logserver",
"path": "logserver/__init__.py",
"copies": "1",
"size": "2877",
"license": "mit",
"hash": -3612927125690753000,
"line_mean": 34.5185185185,
"line_max": 79,
"alpha_frac": 0.6906499826,
"autogenerated": false,
"ratio": 4.187772925764192,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 81
} |
"""A simple set of tools for building accented glyphs.
# Hey look! A demonstration:
from robofab.accentBuilder import AccentTools, buildRelatedAccentList
font = CurrentFont
# a list of accented glyphs that you want to build
myList=['Aacute', 'aacute']
# search for glyphs related to glyphs in myList and add them to myList
myList=buildRelatedAccentList(font, myList)+myList
# start the class
at=AccentTools(font, myList)
# clear away any anchors that exist (this is optional)
at.clearAnchors()
# add necessary anchors if you want to
at.buildAnchors(ucXOffset=20, ucYOffset=40, lcXOffset=15, lcYOffset=30)
# print a report of any errors that occured
at.printAnchorErrors()
# build the accented glyphs if you want to
at.buildAccents()
# print a report of any errors that occured
at.printAccentErrors()
"""
#XXX! This is *very* experimental! I think it works, but you never know.
from robofab.gString import lowercase_plain, accents, uppercase_plain, splitAccent, findAccentBase
from robofab.tools.toolsAll import readGlyphConstructions
import robofab
from robofab.interface.all.dialogs import ProgressBar
from robofab.world import RFWorld
inFontLab = RFWorld().inFontLab
anchorColor=125
accentColor=75
def stripSuffix(glyphName):
"""strip away unnecessary suffixes from a glyph name"""
if glyphName.find('.') != -1:
baseName = glyphName.split('.')[0]
if glyphName.find('.sc') != -1:
baseName = '.'.join([baseName, 'sc'])
return baseName
else:
return glyphName
def buildRelatedAccentList(font, list):
"""build a list of related glyphs suitable for use with AccentTools"""
searchList = []
baseGlyphs = {}
foundList = []
for glyphName in list:
splitNames = splitAccent(glyphName)
baseName = splitNames[0]
accentNames = splitNames[1]
if baseName not in searchList:
searchList.append(baseName)
if baseName not in list(baseGlyphs.keys()):
baseGlyphs[baseName] = [accentNames]
else:
baseGlyphs[baseName].append(accentNames)
foundGlyphs = findRelatedGlyphs(font, searchList, doAccents=0)
for baseGlyph in list(foundGlyphs.keys()):
for foundGlyph in foundGlyphs[baseGlyph]:
for accentNames in baseGlyphs[baseGlyph]:
foundList.append(makeAccentName(foundGlyph, accentNames))
return foundList
def findRelatedGlyphs(font, searchItem, doAccents=True):
"""Gather up a bunch of related glyph names. Send it either a
single glyph name 'a', or a list of glyph names ['a', 'x'] and it
returns a dict like: {'a': ['atilde', 'a.alt', 'a.swash']}. if doAccents
is False it will skip accented glyph names.
This is a relatively slow operation!"""
relatedGlyphs = {}
for name in list(font.keys()):
base = name.split('.')[0]
if name not in list(relatedGlyphs.keys()):
relatedGlyphs[name] = []
if base not in list(relatedGlyphs.keys()):
relatedGlyphs[base] = []
if doAccents:
accentBase = findAccentBase(name)
if accentBase not in list(relatedGlyphs.keys()):
relatedGlyphs[accentBase] = []
baseAccentBase = findAccentBase(base)
if baseAccentBase not in list(relatedGlyphs.keys()):
relatedGlyphs[baseAccentBase] = []
if base != name and name not in relatedGlyphs[base]:
relatedGlyphs[base].append(name)
if doAccents:
if accentBase != name and name not in relatedGlyphs[accentBase]:
relatedGlyphs[accentBase].append(name)
if baseAccentBase != name and name not in relatedGlyphs[baseAccentBase]:
relatedGlyphs[baseAccentBase].append(name)
foundGlyphs = {}
if isinstance(searchItem, str):
searchList = [searchItem]
else:
searchList = searchItem
for glyph in searchList:
foundGlyphs[glyph] = relatedGlyphs[glyph]
return foundGlyphs
def makeAccentName(baseName, accentNames):
"""make an accented glyph name"""
if isinstance(accentNames, str):
accentNames = [accentNames]
build = []
if baseName.find('.') != -1:
base = baseName.split('.')[0]
suffix = baseName.split('.')[1]
else:
base = baseName
suffix = ''
build.append(base)
for accent in accentNames:
build.append(accent)
buildJoin = ''.join(build)
name = '.'.join([buildJoin, suffix])
return name
def nameBuster(glyphName, glyphConstruct):
stripedSuffixName = stripSuffix(glyphName)
suffix = None
errors = []
accentNames = []
baseName = glyphName
if glyphName.find('.') != -1:
suffix = glyphName.split('.')[1]
if glyphName.find('.sc') != -1:
suffix = glyphName.split('.sc')[1]
if stripedSuffixName not in list(glyphConstruct.keys()):
errors.append('%s: %s not in glyph construction database'%(glyphName, stripedSuffixName))
else:
if suffix is None:
baseName = glyphConstruct[stripedSuffixName][0]
else:
if glyphName.find('.sc') != -1:
baseName = ''.join([glyphConstruct[stripedSuffixName][0], suffix])
else:
baseName = '.'.join([glyphConstruct[stripedSuffixName][0], suffix])
accentNames = glyphConstruct[stripedSuffixName][1:]
return (baseName, stripedSuffixName, accentNames, errors)
class AccentTools:
def __init__(self, font, accentList):
"""several tools for working with anchors and building accents"""
self.glyphConstructions = readGlyphConstructions()
self.accentList = accentList
self.anchorErrors = ['ANCHOR ERRORS:']
self.accentErrors = ['ACCENT ERRORS:']
self.font = font
def clearAnchors(self, doProgress=True):
"""clear all anchors in the font"""
tickCount = len(self.font)
if doProgress:
bar = ProgressBar("Cleaning all anchors...", tickCount)
tick = 0
for glyphName in self.accentList:
if doProgress:
bar.label(glyphName)
baseName, stripedSuffixName, accentNames, errors = nameBuster(glyphName, self.glyphConstructions)
existError = False
if len(errors) > 0:
existError = True
if not existError:
toClear = [baseName]
for accent, position in accentNames:
toClear.append(accent)
for glyphName in toClear:
try:
self.font[glyphName].clearAnchors()
except IndexError: pass
if doProgress:
bar.tick(tick)
tick = tick+1
if doProgress:
bar.close()
def buildAnchors(self, ucXOffset=0, ucYOffset=0, lcXOffset=0, lcYOffset=0, markGlyph=True, doProgress=True):
"""add the necessary anchors to the glyphs if they don't exist
some flag definitions:
uc/lc/X/YOffset=20 offset values for the anchors
markGlyph=1 mark the glyph that is created
doProgress=1 show a progress bar"""
accentOffset = 10
tickCount = len(self.accentList)
if doProgress:
bar = ProgressBar('Adding anchors...', tickCount)
tick = 0
for glyphName in self.accentList:
if doProgress:
bar.label(glyphName)
previousPositions = {}
baseName, stripedSuffixName, accentNames, errors = nameBuster(glyphName, self.glyphConstructions)
existError = False
if len(errors) > 0:
existError = True
for anchorError in errors:
self.anchorErrors.append(anchorError)
if not existError:
existError = False
try:
self.font[baseName]
except IndexError:
self.anchorErrors.append(' '.join([glyphName, ':', baseName, 'does not exist.']))
existError = True
for accentName, accentPosition in accentNames:
try:
self.font[accentName]
except IndexError:
self.anchorErrors.append(' '.join([glyphName, ':', accentName, 'does not exist.']))
existError = True
if not existError:
#glyph = self.font.newGlyph(glyphName, clear=True)
for accentName, accentPosition in accentNames:
if baseName.split('.')[0] in lowercase_plain:
xOffset = lcXOffset-accentOffset
yOffset = lcYOffset-accentOffset
else:
xOffset = ucXOffset-accentOffset
yOffset = ucYOffset-accentOffset
# should I add a cedilla and ogonek yoffset override here?
if accentPosition not in list(previousPositions.keys()):
self._dropAnchor(self.font[baseName], accentPosition, xOffset, yOffset)
if markGlyph:
self.font[baseName].mark = anchorColor
if inFontLab:
self.font[baseName].update()
else:
self._dropAnchor(self.font[previousPositions[accentPosition]], accentPosition, xOffset, yOffset)
self._dropAnchor(self.font[accentName], accentPosition, accentOffset, accentOffset, doAccentPosition=1)
previousPositions[accentPosition] = accentName
if markGlyph:
self.font[accentName].mark = anchorColor
if inFontLab:
self.font[accentName].update()
if inFontLab:
self.font.update()
if doProgress:
bar.tick(tick)
tick = tick+1
if doProgress:
bar.close()
def printAnchorErrors(self):
"""print errors encounted during buildAnchors"""
if len(self.anchorErrors) == 1:
print('No anchor errors encountered')
else:
for i in self.anchorErrors:
print(i)
def _dropAnchor(self, glyph, positionName, xOffset=0, yOffset=0, doAccentPosition=False):
"""anchor adding method. for internal use only."""
existingAnchorNames = []
for anchor in glyph.getAnchors():
existingAnchorNames.append(anchor.name)
if doAccentPosition:
positionName = ''.join(['_', positionName])
if positionName not in existingAnchorNames:
glyphLeft, glyphBottom, glyphRight, glyphTop = glyph.box
glyphXCenter = glyph.width/2
if positionName == 'top':
glyph.appendAnchor(positionName, (glyphXCenter, glyphTop+yOffset))
elif positionName == 'bottom':
glyph.appendAnchor(positionName, (glyphXCenter, glyphBottom-yOffset))
elif positionName == 'left':
glyph.appendAnchor(positionName, (glyphLeft-xOffset, glyphTop))
elif positionName == 'right':
glyph.appendAnchor(positionName, (glyphRight+xOffset, glyphTop))
elif positionName == '_top':
glyph.appendAnchor(positionName, (glyphXCenter, glyphBottom-yOffset))
elif positionName == '_bottom':
glyph.appendAnchor(positionName, (glyphXCenter, glyphTop+yOffset))
elif positionName == '_left':
glyph.appendAnchor(positionName, (glyphRight+xOffset, glyphTop))
elif positionName == '_right':
glyph.appendAnchor(positionName, (glyphLeft-xOffset, glyphTop))
if inFontLab:
glyph.update()
def buildAccents(self, clear=True, adjustWidths=True, markGlyph=True, doProgress=True):
"""build accented glyphs. some flag definitions:
clear=1 clear the glyphs if they already exist
markGlyph=1 mark the glyph that is created
doProgress=1 show a progress bar
adjustWidths=1 will fix right and left margins when left or right accents are added"""
tickCount = len(self.accentList)
if doProgress:
bar = ProgressBar('Building accented glyphs...', tickCount)
tick = 0
for glyphName in self.accentList:
if doProgress:
bar.label(glyphName)
existError = False
anchorError = False
baseName, stripedSuffixName, accentNames, errors = nameBuster(glyphName, self.glyphConstructions)
if len(errors) > 0:
existError = True
for accentError in errors:
self.accentErrors.append(accentError)
if not existError:
baseAnchors = []
try:
self.font[baseName]
except IndexError:
self.accentErrors.append('%s: %s does not exist.'%(glyphName, baseName))
existError = True
else:
for anchor in self.font[baseName].anchors:
baseAnchors.append(anchor.name)
for accentName, accentPosition in accentNames:
accentAnchors = []
try:
self.font[accentName]
except IndexError:
self.accentErrors.append('%s: %s does not exist.'%(glyphName, accentName))
existError = True
else:
for anchor in self.font[accentName].getAnchors():
accentAnchors.append(anchor.name)
if accentPosition not in baseAnchors:
self.accentErrors.append('%s: %s not in %s anchors.'%(glyphName, accentPosition, baseName))
anchorError = True
if ''.join(['_', accentPosition]) not in accentAnchors:
self.accentErrors.append('%s: %s not in %s anchors.'%(glyphName, ''.join(['_', accentPosition]), accentName))
anchorError = True
if not existError and not anchorError:
destination = self.font.compileGlyph(glyphName, baseName, self.glyphConstructions[stripedSuffixName][1:], adjustWidths)
if markGlyph:
destination.mark = accentColor
if doProgress:
bar.tick(tick)
tick = tick+1
if doProgress:
bar.close()
def printAccentErrors(self):
"""print errors encounted during buildAccents"""
if len(self.accentErrors) == 1:
print('No accent errors encountered')
else:
for i in self.accentErrors:
print(i)
| {
"repo_name": "adrientetar/robofab",
"path": "Lib/robofab/tools/accentBuilder.py",
"copies": "1",
"size": "12412",
"license": "bsd-3-clause",
"hash": 6301245850819760000,
"line_mean": 34.6666666667,
"line_max": 124,
"alpha_frac": 0.7106832098,
"autogenerated": false,
"ratio": 3.28794701986755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9198318599510211,
"avg_score": 0.06006232603146751,
"num_lines": 348
} |
"""A simple setup module for MkDocs-Merge, based on the pip setup.py file.
See:
https://github.com/pypa/pip/blob/1.5.6/setup.py
"""
import codecs
import os
import re
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
# see: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(HERE, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
REQUIREMENTS = ['click>=5.0',
'mkdocs>=0.16',
'ruamel.yaml>=0.15']
EXTRA_REQUIREMENTS = ['tox>=2.0',
'nose',
'coverage',
'codacy-coverage']
# Package description
setup(
name='mkdocs-merge',
version=find_version('mkdocsmerge', '__init__.py'),
description='Tool to merge multiple MkDocs sites into a single directory',
url='https://github.com/ovasquez/mkdocs-merge',
download_url='https://github.com/ovasquez/mkdocs-merge/archive/master.zip',
license='MIT',
author='Oscar Vasquez',
author_email='oscar@vasquezcr.com',
keywords=['mkdocs', 'documentation', 'merge', 'multiple'],
packages=find_packages(),
include_package_data=True,
install_requires=REQUIREMENTS,
extras_require={
'dev': EXTRA_REQUIREMENTS
},
entry_points={
"console_scripts": [
"mkdocs-merge = mkdocsmerge.__main__:cli"
]
},
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Topic :: Documentation',
'Topic :: Text Processing',
'Environment :: Console',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
zip_safe=False
)
| {
"repo_name": "ovasquez/mkdocs-merge",
"path": "setup.py",
"copies": "1",
"size": "2430",
"license": "mit",
"hash": 7493832025103100000,
"line_mean": 30.1538461538,
"line_max": 79,
"alpha_frac": 0.5880658436,
"autogenerated": false,
"ratio": 3.7442218798151004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9832287723415101,
"avg_score": 0,
"num_lines": 78
} |
# A simple setup script to create an executable using PyQt4. This also
# demonstrates the method for creating a Windows executable that does not have
# an associated console.
#
# PyQt4app.py is a very simple type of PyQt4 application
#
# Run the build process by running the command 'python setup.py build'
#
# If everything works well you should find a subdirectory in the build
# subdirectory that contains the files needed to run the application
import sys
from cx_Freeze import setup, Executable
import mcl
import requests.certs
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name = mcl.NAME,
version = mcl.VERSION,
description = mcl.DESCRIPTION,
options = {
"build_exe": {
"include_files": [
(requests.certs.where(), 'cacert.pem')
]
}
},
executables = [
Executable(
"bin/mcl.py",
base=base,
icon='mcl/gui/icons/MCL.ico'
)
],
) | {
"repo_name": "narthollis/eve-mcl",
"path": "setup.py",
"copies": "1",
"size": "1062",
"license": "bsd-2-clause",
"hash": -2808741308415549400,
"line_mean": 24.9268292683,
"line_max": 78,
"alpha_frac": 0.5951035782,
"autogenerated": false,
"ratio": 4.100386100386101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51954896785861,
"avg_score": null,
"num_lines": null
} |
# A simple setup script to create an executable using PyQt4. This also
# demonstrates the method for creating a Windows executable that does not have
# an associated console.
#
# PyQt4app.py is a very simple type of PyQt4 application
#
# Run the build process by running the command 'python setup.py build'
#
# If everything works well you should find a subdirectory in the build
# subdirectory that contains the files needed to run the application
application_title = "AMV Tracker" #what you want to application to be called
main_python_file = "amv_tracker.py" #the name of the python file you use to run the program
import sys
from cx_Freeze import setup, Executable
base = None
if sys.platform == "win32":
base = "Win32GUI"
includes = ["atexit","re"]
setup(
name = application_title,
version = "0.1",
description = "Sample cx_Freeze PyQt4 script",
options = {"build_exe" : {"includes" : includes }},
executables = [Executable(main_python_file, base = base)])
| {
"repo_name": "bsobotka/amv_tracker",
"path": "setup.py",
"copies": "1",
"size": "1042",
"license": "mit",
"hash": 7800845134199603000,
"line_mean": 32.7333333333,
"line_max": 91,
"alpha_frac": 0.6938579655,
"autogenerated": false,
"ratio": 3.789090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9897630888688584,
"avg_score": 0.017063597180465042,
"num_lines": 30
} |
# A simple setup script to create various executables with
# different User Access Control flags in the manifest.
# Run the build process by entering 'setup.py py2exe' or
# 'python setup.py py2exe' in a console prompt.
#
# If everything works well, you should find a subdirectory named 'dist'
# containing lots of executables
from distutils.core import setup
import py2exe
# The targets to build
# create a target that says nothing about UAC - On Python 2.6+, this
# should be identical to "asInvoker" below. However, for 2.5 and
# earlier it will force the app into compatibility mode (as no
# manifest will exist at all in the target.)
t1 = dict(script="hello.py",
dest_base="not_specified")
# targets with different values for requestedExecutionLevel
t2 = dict(script="hello.py",
dest_base="as_invoker",
uac_info="asInvoker")
t3 = dict(script="hello.py",
dest_base="highest_available",
uac_info="highestAvailable")
t4 = dict(script="hello.py",
dest_base="require_admin",
uac_info="requireAdministrator")
console = [t1, t2, t3, t4]
# hack to make windows copies of them all too, but
# with '_w' on the tail of the executable.
windows = [t1.copy(), t2.copy(), t3.copy(), t4.copy()]
for t in windows:
t['dest_base'] += "_w"
setup(
version = "0.5.0",
description = "py2exe user-access-control sample script",
name = "py2exe samples",
# targets to build
windows = windows,
console = console,
)
| {
"repo_name": "pupboss/xndian",
"path": "deploy/site-packages/py2exe/samples/user_access_control/setup.py",
"copies": "1",
"size": "1548",
"license": "mit",
"hash": -8942405317529518000,
"line_mean": 31.652173913,
"line_max": 71,
"alpha_frac": 0.6595607235,
"autogenerated": false,
"ratio": 3.4630872483221475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.958299513040523,
"avg_score": 0.007930568283383678,
"num_lines": 46
} |
""" A simple setuptools file """
from setuptools import setup
setup(
name="PyPi_py3",
version='0.1 dev',
description='Ensure proper python3 adherence among pypi package maintainers',
long_description="""Inspired by Guido's talk at PyCon2015, many packages on the pypi repository are still
not Python3 supported. This project intends to inform these maintainers and provide
a mechanism for newer, fresher developer blood to contribute and push forward. """,
url="https://github.com/SunPowered/pypi_py3",
author="Tim van Boxtel",
author_email="tim@vanboxtel.ca",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Utilities"
],
keywords=["python3", "pypi"],
packages=["pypi_py3"]
)
| {
"repo_name": "SunPowered/pypi_py3",
"path": "setup.py",
"copies": "1",
"size": "1085",
"license": "mit",
"hash": -8999620628796216000,
"line_mean": 37.75,
"line_max": 109,
"alpha_frac": 0.6285714286,
"autogenerated": false,
"ratio": 4.305555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5434126984155555,
"avg_score": null,
"num_lines": null
} |
"""A simple simulator for the OPC LED panels.
Usage: python simulator.py
To make the LED-bot talk to this server, change the server address to
`localhost:7890` in your configuration.
"""
from struct import unpack
import SocketServer
W = 64
H = 32
led_num = W * H
led_width = led_height = led_spacing = 3
screen_width = W * (led_width + led_spacing) + led_spacing
screen_height = H * (led_height + led_spacing) + led_spacing
header = 'B' * 4
udp_format = header + ''.join('B' for _ in xrange(led_num * 3))
class OPCHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
data = self.request.recv(4096*32).strip()
values = unpack(udp_format, data)
# reverse the data, since we do the craziness in our client.
values = values[::-1][:-4]
screen = self.server.screen
for h in range(H):
for w in range(W):
i = (h*W + w) * 3
fill = '#%02x%02x%02x' % values[i:i+3]
left = led_spacing + w * (led_width + led_spacing)
top = led_spacing + h * (led_height + led_spacing)
screen.create_rectangle(
left, top, left+led_width, top+led_height, fill=fill
)
screen.update()
class OPCServer(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self.screen = get_canvas()
self.screen.update()
def get_canvas():
""" Creates a Tkinter canvas. """
from Tkinter import Tk, Canvas, BOTH
root = Tk()
root.title('LED bot simulator')
root.geometry("%sx%s" % (screen_width, screen_height))
canvas = Canvas(root)
canvas.pack(fill=BOTH, expand=1)
canvas.create_rectangle(
0, 0, screen_width, screen_height, outline="#000", fill="#000"
)
return canvas
if __name__ == "__main__":
HOST, PORT = "localhost", 7890
# Create the server
server = OPCServer((HOST, PORT), OPCHandler)
# Start the server; Interrupt with Ctrl-C
server.serve_forever()
| {
"repo_name": "marqsm/LED-bot",
"path": "LEDBot/simulator.py",
"copies": "1",
"size": "2476",
"license": "mit",
"hash": 2031069806492160500,
"line_mean": 25.3404255319,
"line_max": 101,
"alpha_frac": 0.6175282714,
"autogenerated": false,
"ratio": 3.63049853372434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.474802680512434,
"avg_score": null,
"num_lines": null
} |
"""A simple Slack integration via webhook
Inspired by slackweb http://qiita.com/satoshi03/items/14495bf431b1932cb90b
"""
import json
import urllib3
import certifi
class Slack:
def __init__(self, url):
"""Config a Slack connection"""
self.url = url
# The connection is initiated lazily by urllib3
self.pool = urllib3.PoolManager(
# Always check the HTTPS certificate
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where(),
)
def notify(self, **kwargs):
"""Collect **kwargs as JSON and talk to Slack."""
return self.send(payload=kwargs)
def send(self, payload):
"""Send the payload to Slack webhook API.
Ref: https://api.slack.com/incoming-webhooks
:param: payload:
A dict-like object passed as JSON content
"""
response = self.pool.urlopen(
"POST",
self.url,
headers={'Content-Type': "application/json"},
body=json.dumps(payload)
)
return response.status, response.data.decode('utf8')
| {
"repo_name": "pycontw/pycontw2016",
"path": "src/proposals/management/commands/slack.py",
"copies": "1",
"size": "1109",
"license": "mit",
"hash": -5768779436617011000,
"line_mean": 26.725,
"line_max": 74,
"alpha_frac": 0.5960324617,
"autogenerated": false,
"ratio": 4.032727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5128759734427273,
"avg_score": null,
"num_lines": null
} |
"""A simple socket interface."""
import socket
from multiprocessing import Process
from botbot.debug import debug
class Bot(object):
"""Socket bot"""
def __init__(self, host, port, debug=False):
"""Initialize the bot with host and port. Debig is an optional
flag that enables all reads and write to be displayed to the
terminal.
"""
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
self.debug = debug
def close(self):
"""Close the socket"""
self._s.close()
def read(self, size=4096):
"""Read maximum size bytes from socket"""
msg = str(self._s.recv(size), 'utf-8', errors='replace')
if self.debug:
Process(target=debug, args=(msg,)).start()
return msg
def write(self, msg):
"""Write all of message to socket"""
self._s.sendall(bytes(msg, 'utf-8'))
if self.debug:
Process(target=debug, args=('>>> {}'.format(msg),)).start()
| {
"repo_name": "khile/botbot",
"path": "botbot/bot.py",
"copies": "1",
"size": "1045",
"license": "mit",
"hash": 7522091326867453000,
"line_mean": 27.2432432432,
"line_max": 71,
"alpha_frac": 0.5837320574,
"autogenerated": false,
"ratio": 3.973384030418251,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 37
} |
"""A simple source that allows one to view a suitably shaped numpy
array as ImageData. This supports both scalar and vector data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import numpy
from vtk.util import vtkConstants
# Enthought library imports
from traits.api import (Instance, Trait, Str, Bool, Button, DelegatesTo, List,
Int)
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from tvtk import array_handler
from tvtk.common import is_old_pipeline
# Local imports
from mayavi.core.source import Source
from mayavi.core.pipeline_info import PipelineInfo
def _check_scalar_array(obj, name, value):
"""Validates a scalar array passed to the object."""
if value is None:
return None
arr = numpy.asarray(value)
assert len(arr.shape) in [2,3], "Scalar array must be 2 or 3 dimensional"
vd = obj.vector_data
if vd is not None:
assert vd.shape[:-1] == arr.shape, \
"Scalar array must match already set vector data.\n"\
"vector_data.shape = %s, given array shape = %s"%(vd.shape,
arr.shape)
return arr
_check_scalar_array.info = 'a 2D or 3D numpy array'
def _check_vector_array(obj, name, value):
"""Validates a vector array passed to the object."""
if value is None:
return None
arr = numpy.asarray(value)
assert len(arr.shape) in [3,4], "Vector array must be 3 or 4 dimensional"
assert arr.shape[-1] == 3, \
"The vectors must be three dimensional with `array.shape[-1] == 3`"
sd = obj.scalar_data
if sd is not None:
assert arr.shape[:-1] == sd.shape, \
"Vector array must match already set scalar data.\n"\
"scalar_data.shape = %s, given array shape = %s"%(sd.shape,
arr.shape)
return arr
_check_vector_array.info = 'a 3D or 4D numpy array with shape[-1] = 3'
######################################################################
# 'ArraySource' class.
######################################################################
class ArraySource(Source):
"""A simple source that allows one to view a suitably shaped numpy
array as ImageData. This supports both scalar and vector data.
"""
# The scalar array data we manage.
scalar_data = Trait(None, _check_scalar_array, rich_compare=False)
# The name of our scalar array.
scalar_name = Str('scalar')
# The vector array data we manage.
vector_data = Trait(None, _check_vector_array, rich_compare=False)
# The name of our vector array.
vector_name = Str('vector')
# The spacing of the points in the array.
spacing = DelegatesTo('change_information_filter', 'output_spacing',
desc='the spacing between points in array')
# The origin of the points in the array.
origin = DelegatesTo('change_information_filter', 'output_origin',
desc='the origin of the points in array')
# Fire an event to update the spacing and origin. This
# is here for backwards compatability. Firing this is no
# longer needed.
update_image_data = Button('Update spacing and origin')
# The image data stored by this instance.
image_data = Instance(tvtk.ImageData, (), allow_none=False)
# Use an ImageChangeInformation filter to reliably set the
# spacing and origin on the output
change_information_filter = Instance(tvtk.ImageChangeInformation, args=(),
kw={'output_spacing' : (1.0, 1.0, 1.0),
'output_origin' : (0.0, 0.0, 0.0)})
# Should we transpose the input data or not. Transposing is
# necessary to make the numpy array compatible with the way VTK
# needs it. However, transposing numpy arrays makes them
# non-contiguous where the data is copied by VTK. Thus, when the
# user explicitly requests that transpose_input_array is false
# then we assume that the array has already been suitably
# formatted by the user.
transpose_input_array = Bool(True, desc='if input array should be transposed (if on VTK will copy the input data)')
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['image_data'])
# Specify the order of dimensions. The default is: [0, 1, 2]
dimensions_order = List(Int, [0, 1, 2])
# Our view.
view = View(Group(Item(name='transpose_input_array'),
Item(name='scalar_name'),
Item(name='vector_name'),
Item(name='spacing'),
Item(name='origin'),
show_labels=True)
)
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
# Set the scalar and vector data at the end so we pop it here.
sd = traits.pop('scalar_data', None)
vd = traits.pop('vector_data', None)
# Now set the other traits.
super(ArraySource, self).__init__(**traits)
self.configure_input_data(self.change_information_filter,
self.image_data)
# And finally set the scalar and vector data.
if sd is not None:
self.scalar_data = sd
if vd is not None:
self.vector_data = vd
self.outputs = [ self.change_information_filter.output ]
self.on_trait_change(self._information_changed, 'spacing,origin')
def __get_pure_state__(self):
d = super(ArraySource, self).__get_pure_state__()
d.pop('image_data', None)
return d
######################################################################
# ArraySource interface.
######################################################################
def update(self):
"""Call this function when you change the array data
in-place."""
d = self.image_data
d.modified()
pd = d.point_data
if self.scalar_data is not None:
pd.scalars.modified()
if self.vector_data is not None:
pd.vectors.modified()
self.change_information_filter.update()
self.data_changed = True
######################################################################
# Non-public interface.
######################################################################
def _image_data_changed(self, value):
self.configure_input_data(self.change_information_filter, value)
def _scalar_data_changed(self, data):
img_data = self.image_data
if data is None:
img_data.point_data.scalars = None
self.data_changed = True
return
dims = list(data.shape)
if len(dims) == 2:
dims.append(1)
# set the dimension indices
dim0, dim1, dim2 = self.dimensions_order
img_data.origin = tuple(self.origin)
img_data.dimensions = tuple(dims)
img_data.extent = 0, dims[dim0]-1, 0, dims[dim1]-1, 0, dims[dim2]-1
if is_old_pipeline():
img_data.update_extent = 0, dims[dim0]-1, 0, dims[dim1]-1, 0, dims[dim2]-1
else:
update_extent = [0, dims[dim0]-1, 0, dims[dim1]-1, 0, dims[dim2]-1]
self.change_information_filter.set_update_extent(update_extent)
if self.transpose_input_array:
img_data.point_data.scalars = numpy.ravel(numpy.transpose(data))
else:
img_data.point_data.scalars = numpy.ravel(data)
img_data.point_data.scalars.name = self.scalar_name
# This is very important and if not done can lead to a segfault!
typecode = data.dtype
if is_old_pipeline():
img_data.scalar_type = array_handler.get_vtk_array_type(typecode)
img_data.update() # This sets up the extents correctly.
else:
filter_out_info = self.change_information_filter.get_output_information(0)
img_data.set_point_data_active_scalar_info(filter_out_info,
array_handler.get_vtk_array_type(typecode), -1)
img_data.modified()
img_data.update_traits()
self.change_information_filter.update()
# Now flush the mayavi pipeline.
self.data_changed = True
def _vector_data_changed(self, data):
img_data = self.image_data
if data is None:
img_data.point_data.vectors = None
self.data_changed = True
return
dims = list(data.shape)
if len(dims) == 3:
dims.insert(2, 1)
data = numpy.reshape(data, dims)
img_data.origin = tuple(self.origin)
img_data.dimensions = tuple(dims[:-1])
img_data.extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
if is_old_pipeline():
img_data.update_extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
else:
self.change_information_filter.update_information()
update_extent = [0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1]
self.change_information_filter.set_update_extent(update_extent)
sz = numpy.size(data)
if self.transpose_input_array:
data_t = numpy.transpose(data, (2, 1, 0, 3))
else:
data_t = data
img_data.point_data.vectors = numpy.reshape(data_t, (sz/3, 3))
img_data.point_data.vectors.name = self.vector_name
if is_old_pipeline():
img_data.update() # This sets up the extents correctly.
else:
img_data.modified()
img_data.update_traits()
self.change_information_filter.update()
# Now flush the mayavi pipeline.
self.data_changed = True
def _scalar_name_changed(self, value):
if self.scalar_data is not None:
self.image_data.point_data.scalars.name = value
self.data_changed = True
def _vector_name_changed(self, value):
if self.vector_data is not None:
self.image_data.point_data.vectors.name = value
self.data_changed = True
def _transpose_input_array_changed(self, value):
if self.scalar_data is not None:
self._scalar_data_changed(self.scalar_data)
if self.vector_data is not None:
self._vector_data_changed(self.vector_data)
def _information_changed(self):
self.change_information_filter.update()
self.data_changed = True
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/sources/array_source.py",
"copies": "3",
"size": "10789",
"license": "bsd-3-clause",
"hash": 8465167727326842000,
"line_mean": 38.6654411765,
"line_max": 119,
"alpha_frac": 0.5693762165,
"autogenerated": false,
"ratio": 3.979712283290299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010578162847264285,
"num_lines": 272
} |
# A simple sphinx plugin which creates HTML redirections from old names
# to new names. It does this by looking for files named "redirect" in
# the documentation source and using the contents to create simple HTML
# redirection pages for changed filenames.
#
# Redirect file should contain one redirect per line, where each line
# has the from and to paths separated by a space. The from path is relative
# to the redirect file, and the to path is relative to the from file.
#
# From:
# https://github.com/openstack/nova-specs/blob/master/doc/source/redirect.py
import os.path
from sphinx.application import ENV_PICKLE_FILENAME
from sphinx.util.console import bold
def setup(app):
from sphinx.application import Sphinx
if not isinstance(app, Sphinx):
return
app.connect('build-finished', emit_redirects)
def process_redirect_file(app, path, ent):
parent_path = path.replace(app.builder.srcdir, app.builder.outdir)
with open(os.path.join(path, ent)) as redirects:
for line in redirects.readlines():
from_path, to_path = line.rstrip().split(' ')
from_path = from_path.replace('.rst', '.html')
to_path = to_path.replace('.rst', '.html')
redirected_filename = os.path.join(parent_path, from_path)
redirected_directory = os.path.dirname(redirected_filename)
if not os.path.exists(redirected_directory):
os.makedirs(redirected_directory)
with open(redirected_filename, 'w') as f:
f.write('<html><head><meta http-equiv="refresh" content="0; '
'url=%s" /></head></html>'
% to_path)
def emit_redirects(app, exc):
app.builder.info(bold('scanning %s for redirects...') % app.builder.srcdir)
def process_directory(path):
for ent in os.listdir(path):
p = os.path.join(path, ent)
if os.path.isdir(p):
process_directory(p)
elif ent == 'redirects':
app.builder.info(' found redirects at %s' % p)
process_redirect_file(app, path, ent)
process_directory(app.builder.srcdir)
app.builder.info('...done') | {
"repo_name": "mrgaaron/docs",
"path": "source/sphinxext/redirect.py",
"copies": "1",
"size": "2204",
"license": "mit",
"hash": 5962539522616888000,
"line_mean": 37.6842105263,
"line_max": 79,
"alpha_frac": 0.6415607985,
"autogenerated": false,
"ratio": 3.9078014184397163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5049362216939717,
"avg_score": null,
"num_lines": null
} |
'''A simple standalone target for the scheme interpreter.'''
from __future__ import absolute_import
import os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(here, 'lang-scheme'))
from rpython.rlib.parsing.makepackrat import BacktrackException
from rpython.rlib.streamio import open_file_as_stream
from scheme.execution import ExecutionContext
from scheme.object import SchemeQuit, ContinuationReturn
from scheme.ssparser import parse
_EXE_NAME = 'scheme-c'
def entry_point(argv):
path = argv[0] if len(argv) == 1 else argv[-1]
try:
f = open_file_as_stream(path, buffering=0)
except OSError as exc:
os.write(
2,
'%s -- %s (LoadError)\n' % (os.strerror(exc.errno), path))
return 1
try:
code = f.readall()
finally:
f.close()
try:
t = parse(code)
except BacktrackException as exc:
(line, col) = exc.error.get_line_column(code)
# expected = ' '.join(exc.error.expected)
os.write(
2,
'parse error in line %d, column %d' % (line, col))
return 1
ctx = ExecutionContext()
try:
for sexpr in t:
try:
print(sexpr.eval(ctx).to_string())
except ContinuationReturn as exc:
print(exc.result.to_string())
except SchemeQuit:
return 0
else:
return 0
def target(driver, args):
driver.exe_name = _EXE_NAME
return entry_point, None
if __name__ == '__main__':
entry_point(sys.argv)
| {
"repo_name": "jptomo/rpython-lang-scheme",
"path": "targetscheme.py",
"copies": "1",
"size": "1581",
"license": "mit",
"hash": -168653382044187550,
"line_mean": 23.3230769231,
"line_max": 70,
"alpha_frac": 0.6034155598,
"autogenerated": false,
"ratio": 3.5369127516778525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46403283114778526,
"avg_score": null,
"num_lines": null
} |
'''A simple storage interface for feature collections.
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
Compatible interfaces on both ElasticSearch and :mod:`kvlayer`
are provided.
ElasticSearch based storage
===========================
:mod:`dossier.store` provides an interface to storing feature
collections in ElasticSearch.
.. autoclass:: ElasticStore
.. autoclass:: ElasticStoreSync
kvlayer based storage
=====================
:mod:`dossier.store` also provides a convenient interface to a
:mod:`kvlayer` table for storing :class:`dossier.fc.FeatureCollection`.
The interface consists of methods to query, search, add and remove
feature collections from the store. It also provides functions for
defining and searching indexes.
Using a storage backend in your code requires a working ``kvlayer``
configuration, which is usually written in a YAML file like so:
.. code-block:: yaml
kvlayer:
app_name: store
namespace: dossier
storage_type: redis
storage_addresses: ["redis.example.com:6379"]
And here's a full working example that uses local memory to store
feature collections:
.. code-block:: python
from dossier.fc import FeatureCollection
from dossier.store import Store
import kvlayer
import yakonfig
yaml = """
kvlayer:
app_name: store
namespace: dossier
storage_type: local
"""
with yakonfig.defaulted_config([kvlayer], yaml=yaml):
store = Store(kvlayer.client())
fc = FeatureCollection({u'NAME': {'Foo': 1, 'Bar': 2}})
store.put([('1', fc)])
print store.get('1')
See the documentation for :mod:`yakonfig` for more details on the
configuration setup.
Another example showing how to store, retrieve and delete a feature
collection:
.. code-block:: python
fc = dossier.fc.FeatureCollection()
fc[u'NAME'][u'foo'] += 1
fc[u'NAME'][u'bar'] = 42
kvl = kvlayer.client()
store = dossier.store.Store(kvl)
store.put('{yourid}', fc)
assert store.get('{yourid}')[u'NAME'][u'bar'] == 42
store.delete('{yourid}')
assert store.get('{yourid}') is None
Here is another example that demonstrates use of indexing to enable a
poor man's case insensitive search:
.. code-block:: python
fc = dossier.fc.FeatureCollection()
fc[u'NAME'][u'foo'] += 1
fc[u'NAME'][u'bar'] = 42
kvl = kvlayer.client()
store = dossier.store.Store(kvl)
# Index transforms must be defined on every instance of `Store`.
# (The index data is persisted; the transforms themselves are
# ephemeral.)
store.define_index(u'name_casei',
create=feature_index(u'NAME'),
transform=lambda s: s.lower().encode('utf-8'))
store.put('{yourid}', fc) # `put` automatically updates indexes.
assert list(store.index_scan(u'name_casei', 'FoO'))[0] == '{yourid}'
.. autoclass:: Store
.. autofunction:: feature_index
'''
from dossier.store.elastic import ElasticStore, ElasticStoreSync
from dossier.store.store import Store, feature_index
__all__ = ['ElasticStore', 'ElasticStoreSync', 'Store', 'feature_index']
| {
"repo_name": "dossier/dossier.store",
"path": "dossier/store/__init__.py",
"copies": "1",
"size": "3174",
"license": "mit",
"hash": 9137373948712843000,
"line_mean": 28.119266055,
"line_max": 72,
"alpha_frac": 0.6764335224,
"autogenerated": false,
"ratio": 3.720984759671747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4897418282071747,
"avg_score": null,
"num_lines": null
} |
'''A simple storage interface for labels (ground truth data).
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
:mod:`dossier.label` provides a convenient interface to a
:mod:`kvlayer` table for storing ground truth data, otherwise known as
"labels." Each label, at the highest level, maps two things (addressed
by content identifiers) to a coreferent value. This coreferent value is
an indication by a human that these two things are "the same", "not the
same" or "I don't know if they are the same." *Sameness* in this case
is determined by the human doing the annotation.
Each label also contains an ``annotator_id``, which identifies the
human that created the label. A timestamp (in milliseconds since the
Unix epoch) is also included on every label.
Example
-------
Using a storage backend in your code requires a working ``kvlayer``
configuration, which is usually written in a YAML file like so:
.. code-block:: yaml
kvlayer:
app_name: store
namespace: dossier
storage_type: redis
storage_addresses: ["redis.example.com:6379"]
And here's a full working example that uses local memory to store
labels:
.. code-block:: python
from dossier.label import Label, LabelStore, CorefValue
import kvlayer
import yakonfig
yaml = """
kvlayer:
app_name: store
namespace: dossier
storage_type: local
"""
with yakonfig.defaulted_config([kvlayer], yaml=yaml):
label_store = LabelStore(kvlayer.client())
lab = Label('a', 'b', 'annotator', CorefValue.Positive)
label_store.put(lab)
assert lab == label_store.get('a', 'b', 'annotator')
See the documentation for :mod:`yakonfig` for more details on the
configuration setup.
.. autoclass:: LabelStore
.. autoclass:: Label
.. autoclass:: CorefValue
:command:`dossier.label` command-line tool
==========================================
.. automodule:: dossier.label.run
'''
from __future__ import absolute_import, division, print_function
from dossier.label.label import Label, LabelStore, CorefValue, expand_labels
from dossier.label.relation_label import RelationLabel, RelationStrength, \
RelationLabelStore
__all__ = ['Label', 'LabelStore', 'CorefValue', 'expand_labels']
| {
"repo_name": "dossier/dossier.label",
"path": "dossier/label/__init__.py",
"copies": "1",
"size": "2292",
"license": "mit",
"hash": -4056349066417656300,
"line_mean": 30.8333333333,
"line_max": 76,
"alpha_frac": 0.7059336824,
"autogenerated": false,
"ratio": 3.794701986754967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5000635669154967,
"avg_score": null,
"num_lines": null
} |
"""A simple store using only in-process memory."""
from __future__ import unicode_literals
import copy
import time
import six
from openid.store import nonce
class ServerAssocs(object):
def __init__(self):
self.assocs = {}
def set(self, assoc):
self.assocs[assoc.handle] = assoc
def get(self, handle):
return self.assocs.get(handle)
def remove(self, handle):
try:
del self.assocs[handle]
except KeyError:
return False
else:
return True
def best(self):
"""Returns association with the oldest issued date.
or None if there are no associations.
"""
best = None
for assoc in self.assocs.values():
if best is None or best.issued < assoc.issued:
best = assoc
return best
def cleanup(self):
"""Remove expired associations.
@return: tuple of (removed associations, remaining associations)
"""
remove = []
for handle, assoc in six.iteritems(self.assocs):
if assoc.getExpiresIn() == 0:
remove.append(handle)
for handle in remove:
del self.assocs[handle]
return len(remove), len(self.assocs)
class MemoryStore(object):
"""In-process memory store.
Use for single long-running processes. No persistence supplied.
"""
def __init__(self):
self.server_assocs = {}
self.nonces = {}
def _getServerAssocs(self, server_url):
try:
return self.server_assocs[server_url]
except KeyError:
assocs = self.server_assocs[server_url] = ServerAssocs()
return assocs
def storeAssociation(self, server_url, assoc):
assocs = self._getServerAssocs(server_url)
assocs.set(copy.deepcopy(assoc))
def getAssociation(self, server_url, handle=None):
assocs = self._getServerAssocs(server_url)
if handle is None:
return assocs.best()
else:
return assocs.get(handle)
def removeAssociation(self, server_url, handle):
assocs = self._getServerAssocs(server_url)
return assocs.remove(handle)
def useNonce(self, server_url, timestamp, salt):
if abs(timestamp - time.time()) > nonce.SKEW:
return False
anonce = (six.text_type(server_url), int(timestamp), six.text_type(salt))
if anonce in self.nonces:
return False
else:
self.nonces[anonce] = None
return True
def cleanupNonces(self):
now = time.time()
expired = []
for anonce in self.nonces:
if abs(anonce[1] - now) > nonce.SKEW:
# removing items while iterating over the set could be bad.
expired.append(anonce)
for anonce in expired:
del self.nonces[anonce]
return len(expired)
def cleanupAssociations(self):
remove_urls = []
removed_assocs = 0
for server_url, assocs in six.iteritems(self.server_assocs):
removed, remaining = assocs.cleanup()
removed_assocs += removed
if not remaining:
remove_urls.append(server_url)
# Remove entries from server_assocs that had none remaining.
for server_url in remove_urls:
del self.server_assocs[server_url]
return removed_assocs
def __eq__(self, other):
return ((self.server_assocs == other.server_assocs) and (self.nonces == other.nonces))
def __ne__(self, other):
return not (self == other)
| {
"repo_name": "openid/python-openid",
"path": "openid/store/memstore.py",
"copies": "1",
"size": "3649",
"license": "apache-2.0",
"hash": -3276404655914737000,
"line_mean": 27.5078125,
"line_max": 94,
"alpha_frac": 0.5861879967,
"autogenerated": false,
"ratio": 4.058954393770857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001775112323491656,
"num_lines": 128
} |
#A simple substitution encryption using alphabet strings and dictionaries
"""
Good programming practice tells us to make as few heavy system calls as possible. And where possible, we should front-load the heavy work if it will save us significant time later. crypt-1 called shift() for each character in the phrase. That's a lot of work! Instead, let's make a dictionary that allows us easy lookup of shifted characters right away, without having to do the same operation over and over.
"""
import string
import sys #Hey, a new module!
args = sys.argv
try:
shift_count = int(sys.argv[1])
except:
shift_count = 0
try:
phrase_to_encrypt = sys.argv[2]
except IndexError:
print("No phrase given!")
exit()
def build_shift_library(shift):
libraries = [string.ascii_lowercase, string.ascii_uppercase]
shift_library = {}
for lib in libraries:
for char in lib:
index = lib.index(char) + shift
try:
shift_library[char] = lib[index]
except IndexError:
shift_library[char] = lib[index-len(lib)]
return shift_library
def encrypt(phrase):
"""
Takes string PHRASE and returns an encrypted version using SHIFT_COUNT
"""
shift_library = build_shift_library(shift_count)
encrypted_phrase = ""
for char in phrase:
try:
encrypted_phrase += shift_library[char]
except KeyError:
encrypted_phrase += char
return encrypted_phrase
print(encrypt(phrase_to_encrypt))
| {
"repo_name": "mttaggart/python-cs",
"path": "crypt/crypt-2.py",
"copies": "1",
"size": "1524",
"license": "mit",
"hash": 5558027791181760000,
"line_mean": 33.6363636364,
"line_max": 408,
"alpha_frac": 0.6738845144,
"autogenerated": false,
"ratio": 4.118918918918919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5292803433318918,
"avg_score": null,
"num_lines": null
} |
#A simple substitution encryption using alphabet strings
import string
import sys #Hey, a new module!
args = sys.argv
try:
shift_count = int(sys.argv[1])
except:
shift_count = 0
try:
phrase_to_encrypt = sys.argv[2]
except IndexError:
print("No phrase given!")
exit()
lowers = string.ascii_lowercase
uppers = string.ascii_uppercase
def shift(char, shift):
"""
Takes string CHAR and returns the character at shifted index of the alphabet.
shift('e',1) -> 'f'
"""
#First thing we want to do is find which alphabet this thing is in, if any.
if char in string.ascii_lowercase:
library = string.ascii_lowercase
elif char in string.ascii_uppercase:
library = string.ascii_uppercase
else:
#Nothing more to be done here; probably a space or punctuation, which we won't mess with.
return char
index = library.index(char) + shift
"""
Our shift is a positive number. So it's entirely possible the resulting index is beyond length of the library. To account for this, we take the difference between our shifted index and the length, essentially wrapping around back to the beginning of the string.
"""
try:
return library[index]
except IndexError:
return library[index-len(library)]
def encrypt(phrase):
"""
Takes string PHRASE and returns an encrypted version using SHIFT_COUNT
"""
encrypted_phrase = ""
for char in phrase:
encrypted_phrase += shift(char,shift_count)
return encrypted_phrase
print(encrypt(phrase_to_encrypt))
| {
"repo_name": "mttaggart/python-cs",
"path": "crypt/crypt-1.py",
"copies": "1",
"size": "1574",
"license": "mit",
"hash": 2577147471215328000,
"line_mean": 31.1224489796,
"line_max": 265,
"alpha_frac": 0.6880559085,
"autogenerated": false,
"ratio": 4.098958333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5287014241833333,
"avg_score": null,
"num_lines": null
} |
"""A simple tasks manager using the Dropbox Datastore API.
This example requires Python 2.7 (for OrderedDict).
This uses the same data model as the JavaScript tutorial example.
The code is somewhat uncharacteristic for Flask apps (e.g. it doesn't
use template files). The advantage of this style is that the entire
app is one file, and it's easier to follow what is going on.
"""
from collections import OrderedDict # Requires Python 2.7.
from threading import Lock
from weakref import WeakValueDictionary
from flask import Flask, request, session, redirect, url_for, abort, render_template_string
from dropbox.client import DropboxClient, DropboxOAuth2Flow, ErrorResponse
from dropbox.datastore import DatastoreManager, Date, DatastoreError
# Fill these in! See https://www.dropbox.com/developers/apps
DROPBOX_APP_KEY = 'CHANGEME'
DROPBOX_APP_SECRET = 'CHANGEME'
# Flask options. These are read by Flask.
DEBUG = True
SECRET_KEY = 'development key'
# Create application object.
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('TASKS_SETTINGS', silent=True)
# Application routes and helper.
@app.route('/')
def home():
with get_access_token_lock():
datastore = open_datastore(refresh=True)
if not datastore:
return '<a href="%s">Link to Dropbox</a>' % url_for('dropbox_auth_start')
return render_template_string(
'''
<h1>My Tasks</h1>
<ul>
{% for task in tasks %}
<li>
{% if task.get('completed') %}
[✓] {{ task.get('taskname') }}
(<a href="{{ url_for('uncomplete', id=task.get_id())}}">mark incomplete</a>)
(<a href="{{ url_for('delete', id=task.get_id()) }}">delete</a>)
{% else %}
[ ] {{ task.get('taskname') }}
(<a href="{{ url_for('complete', id=task.get_id())}}">mark complete</a>)
(<a href="{{ url_for('delete', id=task.get_id()) }}">delete</a>)
{% endif %}
</li>
{% endfor %}
</ul>
<form method="post" action="{{ url_for('add') }}">
<input type="text" name="name" />
<input type="submit" name="Add" />
</form>
<a href="{{ url_for('dropbox_logout') }}">Log out</a>
''',
tasks=sorted(datastore.get_table('tasks').query(),
key=lambda record: record.get('created')))
@app.route('/add', methods=['POST'])
def add():
taskname = request.form.get('name')
if taskname:
with get_access_token_lock():
datastore = open_datastore()
if datastore:
table = datastore.get_table('tasks')
def txn():
table.insert(completed=False, taskname=taskname, created=Date())
try:
datastore.transaction(txn, max_tries=4)
except DatastoreError:
return 'Sorry, something went wrong. Please hit back or reload.'
return redirect(url_for('home'))
@app.route('/delete')
def delete():
id = request.args.get('id')
if id:
with get_access_token_lock():
datastore = open_datastore()
if datastore:
table = datastore.get_table('tasks')
def txn():
record = table.get(id)
if record:
record.delete_record()
try:
datastore.transaction(txn, max_tries=4)
except DatastoreError:
return 'Sorry, something went wrong. Please hit back or reload.'
return redirect(url_for('home'))
@app.route('/complete')
def complete():
return change_completed(True)
@app.route('/uncomplete')
def uncomplete():
return change_completed(False)
def change_completed(completed):
id = request.args.get('id')
if id:
with get_access_token_lock():
datastore = open_datastore()
if datastore:
table = datastore.get_table('tasks')
def txn():
record = table.get(id)
if record:
record.update(completed=completed)
try:
datastore.transaction(txn, max_tries=4)
except DatastoreError:
return 'Sorry, something went wrong. Please hit back or reload.'
return redirect(url_for('home'))
# Locking per access token.
locks = WeakValueDictionary()
meta_lock = Lock()
def get_access_token_lock():
access_token = session.get('access_token')
if not access_token:
return Lock() # Dummy lock.
with meta_lock:
lock = locks.get(access_token)
if lock is None:
locks[access_token] = lock = Lock()
return lock
# LRU cache used by open_datastore.
cache = OrderedDict()
def open_datastore(refresh=False):
access_token = session.get('access_token')
if not access_token:
return None
datastore = cache.get(access_token)
try:
if datastore is not None:
# Delete the cache entry now, so that if the refresh fails we
# don't cache the probably invalid datastore.
del cache[access_token]
if refresh:
datastore.load_deltas()
else:
client = DropboxClient(access_token)
manager = DatastoreManager(client)
datastore = manager.open_default_datastore()
if len(cache) >= 32:
to_delete = next(iter(cache))
del cache[to_delete]
except (ErrorResponse, DatastoreError):
app.logger.exception('An exception occurred opening a datastore')
return None
cache[access_token] = datastore
return datastore
# Dropbox auth routes and helper. Same as ../flask_app/.
@app.route('/dropbox-auth-finish')
def dropbox_auth_finish():
try:
access_token, user_id, url_state = get_auth_flow().finish(request.args)
except DropboxOAuth2Flow.BadRequestException as e:
abort(400)
except DropboxOAuth2Flow.BadStateException as e:
abort(400)
except DropboxOAuth2Flow.CsrfException as e:
abort(403)
except DropboxOAuth2Flow.NotApprovedException as e:
return redirect(url_for('home'))
except DropboxOAuth2Flow.ProviderException as e:
app.logger.exception('Auth error' + e)
abort(403)
session['access_token'] = access_token
return redirect(url_for('home'))
@app.route('/dropbox-auth-start')
def dropbox_auth_start():
return redirect(get_auth_flow().start())
@app.route('/dropbox-logout')
def dropbox_logout():
if 'access_token' in session:
del session['access_token']
return redirect(url_for('home'))
def get_auth_flow():
redirect_uri = url_for('dropbox_auth_finish', _external=True)
return DropboxOAuth2Flow(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, redirect_uri,
session, 'dropbox-auth-csrf-token')
# Main boilerplate.
def main():
app.run(threaded=True)
if __name__ == '__main__':
main()
| {
"repo_name": "bhunter/stopdroproll",
"path": "src/dropbox-python-sdk-2.1.0/example/datastore_app/tasks.py",
"copies": "6",
"size": "7334",
"license": "isc",
"hash": 956610087651548000,
"line_mean": 31.5955555556,
"line_max": 100,
"alpha_frac": 0.5784019635,
"autogenerated": false,
"ratio": 4.047461368653422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7625863332153422,
"avg_score": null,
"num_lines": null
} |
# A simple test for a vtkTkRenderWidget. Run it like so:
# python TestTkRenderWidget.py -B $VTK_DATA_ROOT/Baseline/Rendering
import os
import vtk
from vtk.test import Testing
import Tkinter
from vtk.tk.vtkTkRenderWidget import vtkTkRenderWidget
class TestTkRenderWidget(Testing.vtkTest):
# Stick your VTK pipeline here if you want to create the pipeline
# only once. If you put it in the constructor or in the function
# the pipeline will be created afresh for each and every test.
# create a dummy Tkinter root window.
root = Tkinter.Tk()
# create a rendering window and renderer
ren = vtk.vtkRenderer()
tkrw = vtkTkRenderWidget(root, width=300, height=300)
tkrw.pack()
rw = tkrw.GetRenderWindow()
rw.AddRenderer(ren)
# create an actor and give it cone geometry
cs = vtk.vtkConeSource()
cs.SetResolution(8)
map = vtk.vtkPolyDataMapper()
map.SetInputConnection(cs.GetOutputPort())
act = vtk.vtkActor()
act.SetMapper(map)
# assign our actor to the renderer
ren.AddActor(act)
def testvtkTkRenderWidget(self):
"Test if vtkTkRenderWidget works."
self.rw.Render()
self.root.update()
img_file = "TestTkRenderWidget.png"
Testing.compareImage(self.rw, Testing.getAbsImagePath(img_file))
Testing.interact()
# Dummy tests to demonstrate how the blackbox tests can be done.
def testParse(self):
"Test if vtkActor is parseable"
self._testParse(self.act)
def testGetSet(self):
"Testing Get/Set methods"
self._testGetSet(self.act)
def testBoolean(self):
"Testing Boolean methods"
self._testBoolean(self.act)
if __name__ == "__main__":
cases = [(TestTkRenderWidget, 'test')]
del TestTkRenderWidget
Testing.main(cases)
| {
"repo_name": "sgh/vtk",
"path": "Rendering/Testing/Python/TestTkRenderWidget.py",
"copies": "2",
"size": "1845",
"license": "bsd-3-clause",
"hash": 1545545865803008000,
"line_mean": 27.3846153846,
"line_max": 72,
"alpha_frac": 0.6764227642,
"autogenerated": false,
"ratio": 3.742393509127789,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00982248520710059,
"num_lines": 65
} |
# a simple text encoder using the Huffman coding algorithm
class TreeNode:
parent, child0, child1 = None, None, None
is_leaf = False
code = ''
def __init__(self, child0, child1):
self.count = child0.count + child1.count
self.child0, self.child1 = child0, child1
def set_children_codes(self):
self.child0.code = self.code + '0'
self.child1.code = self.code + '1'
if not self.child0.is_leaf: self.child0.set_children_codes()
if not self.child1.is_leaf: self.child1.set_children_codes()
def get_children_codes(self):
children_codes = []
if self.child0.is_leaf: children_codes.append([self.child0.char,self.child0.count, self.child0.code])
else: children_codes.extend(self.child0.get_children_codes())
if self.child1.is_leaf: children_codes.append([self.child1.char,self.child1.count, self.child1.code])
else: children_codes.extend(self.child1.get_children_codes())
return children_codes
class TreeLeaf:
parent = None
is_leaf= True
code = ''
def __init__(self, item):
self.char = item[0]
self.count = item[1]
class TreeRoots:
def __init__(self, roots):
self.roots = roots
self.count = len(roots)
def pop_min(self):
min_i=0
for i in range(len(self.roots)):
if self.roots[i].count<self.roots[min_i].count:
min_i=i
min_root = self.roots[min_i]
del self.roots[min_i]
self.count -=1
return min_root
def add(self, root):
self.roots.append(root)
self.count +=1
def compress_huffman(text):
char_frequency = {char: text.count(char) for char in text}
tree_leafs = [TreeLeaf(item) for item in char_frequency.items()]
tree_roots = TreeRoots(tree_leafs)
while tree_roots.count>1:
child0 = tree_roots.pop_min()
child1 = tree_roots.pop_min()
parent = TreeNode(child0, child1)
child0.parent = child1.parent = parent
tree_roots.add(parent)
tree_root = tree_roots.roots[0]
tree_root.set_children_codes()
return tree_root.get_children_codes()
text = 'test text'
char_codes = compress_huffman(text)
print('char\tfreq\tcode')
original_size = len(text)*8
compressed_size = 0
for char_code in char_codes:
compressed_size += char_code[1]*len(char_code[2])
print('{}\t{}\t{}'.format(char_code[0], char_code[1], char_code[2]))
print('compression ratio: {:.3}'.format(original_size/compressed_size)) | {
"repo_name": "m-elagdar/Assignments",
"path": "huffman_encoder.py",
"copies": "1",
"size": "2541",
"license": "apache-2.0",
"hash": 5948893015218084000,
"line_mean": 35.3142857143,
"line_max": 109,
"alpha_frac": 0.6253443526,
"autogenerated": false,
"ratio": 3.2164556962025315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9252129164425591,
"avg_score": 0.017934176875388147,
"num_lines": 70
} |
"""A simple text window used to display either Python code with some
simple syntax highlighting, or some other document which will be formatted as
though it was an html file or a simple text file.
The syntax highlighter for Python code is really inadequate; HELP!! :-)
"""
try:
from PyQt4 import QtGui, QtCore
qt_widgets = QtGui
except ImportError:
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets as qt_widgets
import keyword
import sys
if sys.version_info < (3,):
from cStringIO import StringIO
else:
from io import StringIO
class TextWindow(qt_widgets.QMainWindow):
def __init__(self, file_name=None, title="Title", text_type='text',
text='Default text'):
"""Simple text window whose input comes from a file, if a file_name
is specified, or from a supplied string.
text_type can be one of 4 values: 'text', 'code', 'html', 'python'.
If 'python' is specified, some basic syntax highlighting is added.
"""
super(TextWindow, self).__init__(None)
self.setWindowTitle(title)
self.resize(900, 600)
self.editor = qt_widgets.QTextEdit(self)
self.setCentralWidget(self.editor)
self.editor.setFocus()
if file_name is not None:
text = self.load(file_name)
if text_type == 'text' or text_type == 'html':
self.set_text_font()
elif text_type == 'code':
self.set_monospace_font()
elif text_type == 'python':
self.set_monospace_font()
self.highlighter = Highlighter(self.editor.document())
else:
self.set_text_font()
text = "Unknown text_type: {}".format(text_type)
if text_type == 'html':
self.editor.setHtml(text)
else:
self.editor.setPlainText(text)
def set_text_font(self):
font = QtGui.QFont()
font.setFamily('Arial')
font.setFixedPitch(False)
font.setPointSize(12)
self.editor.setFont(font)
def set_monospace_font(self):
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(12)
self.editor.setFont(font)
def load(self, f):
if not QtCore.QFile.exists(f):
self.text_type = 'text'
return "File %s could not be found." % f
try:
file_handle = QtCore.QFile(f)
file_handle.open(QtCore.QFile.ReadOnly)
data = file_handle.readAll()
codec = QtCore.QTextCodec.codecForHtml(data)
return codec.toUnicode(data)
except:
self.text_type = 'text'
return 'Problem reading file %s' % f
class Highlighter(QtGui.QSyntaxHighlighter):
"""Adapted from example included with PyQt distribution"""
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
keywordFormat = QtGui.QTextCharFormat()
keywordFormat.setForeground(QtCore.Qt.blue)
keywordFormat.setFontWeight(QtGui.QFont.Bold)
keywordPatterns = ["\\b{}\\b".format(k) for k in keyword.kwlist]
self.highlightingRules = [(QtCore.QRegExp(pattern), keywordFormat)
for pattern in keywordPatterns]
classFormat = QtGui.QTextCharFormat()
classFormat.setFontWeight(QtGui.QFont.Bold)
self.highlightingRules.append((QtCore.QRegExp("\\bQ[A-Za-z]+\\b"),
classFormat))
singleLineCommentFormat = QtGui.QTextCharFormat()
singleLineCommentFormat.setForeground(QtCore.Qt.gray)
self.highlightingRules.append((QtCore.QRegExp("#[^\n]*"),
singleLineCommentFormat))
quotationFormat = QtGui.QTextCharFormat()
quotationFormat.setForeground(QtCore.Qt.darkGreen)
self.highlightingRules.append((QtCore.QRegExp("\".*\""),
quotationFormat))
self.highlightingRules.append((QtCore.QRegExp("'.*'"),
quotationFormat))
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
expression = QtCore.QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
if __name__ == '__main__':
app = qt_widgets.QApplication([])
editor1 = TextWindow(file_name="../README.rst",
title="Demo of text file",
text_type = 'text')
editor1.move(10, 10)
editor1.show()
editor2 = TextWindow(file_name = "readme.html",
title="Demo of html file",
text_type="html")
editor2.move(840, 10)
editor2.show()
editor3 = TextWindow(title="Demo of Python file", file_name=__file__, text_type='python')
editor3.move(440, 410)
editor3.show()
editor4 = TextWindow(file_name="../README.rst",
title="Demo of unknown test_type",
text_type = 'unknown')
editor4.show()
sys.exit(app.exec_())
| {
"repo_name": "aroberge/easygui_qt",
"path": "easygui_qt/show_text_window.py",
"copies": "1",
"size": "5272",
"license": "bsd-3-clause",
"hash": -8414295623541997000,
"line_mean": 32.3670886076,
"line_max": 93,
"alpha_frac": 0.5992033384,
"autogenerated": false,
"ratio": 3.984882842025699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002868585301189815,
"num_lines": 158
} |
"""A simple, threaded HTTPS server."""
from http.server import HTTPServer, SimpleHTTPRequestHandler
from socketserver import ThreadingMixIn
from ssl import wrap_socket
from typing import Mapping
__version__ = '0.1'
__author__ = 'Chris Calderon'
__email__ = 'pythonwiz@protonmail.com'
__license__ = 'MIT'
class ThreadedHTTPSServer(ThreadingMixIn, HTTPServer):
"""An HTTPS server class which creates a thread for each client."""
def __init__(self, certfile, keyfile, *args, **kwds):
self.certfile = certfile
self.keyfile = keyfile
super().__init__(*args, **kwds)
def server_bind(self):
super().server_bind()
self.socket = wrap_socket(self.socket,
server_side=True,
certfile=self.certfile,
keyfile=self.keyfile,
do_handshake_on_connect=False)
def get_request(self):
socket, addr = super().get_request()
socket.do_handshake()
return socket, addr
class PrettyURLRequestHandler(SimpleHTTPRequestHandler):
"""A handler class for pretty routing."""
routes = None # type: Mapping[str, str]
def do_GET(self):
if self.path in self.routes:
print('rerouting:', self.path, '->', self.routes[self.path])
self.path = self.routes[self.path]
super().do_GET()
def do_HEAD(self):
if self.path in self.routes:
print('rerouting:', self.path, '->', self.routes[self.path])
self.path = self.routes[self.path]
super().do_HEAD()
def make_server(host: str,
port: int,
certificate: str,
private_key: str,
routes: Mapping[str, str]) -> ThreadedHTTPSServer:
"""Creates an HTTPS server.
Arguments:
host -- The IPv4 address or hostname to bind to.
port -- The integer port to bind to.
certificate -- The path to the certificate file.
private_key -- The path to the private_key file.
routes -- A mapping of pretty urls to file names.
"""
PrettyURLRequestHandler.routes = routes
return ThreadedHTTPSServer(certificate,
private_key,
(host, port),
PrettyURLRequestHandler)
| {
"repo_name": "ChrisCalderon/webserver",
"path": "webserver/__init__.py",
"copies": "1",
"size": "2368",
"license": "mit",
"hash": -4233666685929304000,
"line_mean": 32.8285714286,
"line_max": 72,
"alpha_frac": 0.5760135135,
"autogenerated": false,
"ratio": 4.369003690036901,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.54450172035369,
"avg_score": null,
"num_lines": null
} |
"""A simple thread subclass for making ev3 function calls asynchronous.
EXAMPLE USAGE:
import time
from ev3 import *
finished = False
def keep_alive_finished(result):
global finished
print 'The keep_alive() function returned: ', result
finished = True
if ("__main__" == __name__):
try:
async_thread = async.AsyncThread()
with ev3.EV3() as brick:
async_thread.put(brick.keep_alive, keep_alive_finished)
while (not finished):
print 'Waiting...'
time.sleep(0.1)
except ev3.EV3Error as ex:
print 'An error occurred: ', ex
async_thread.stop()
"""
import threading
import Queue
class AsyncThread(threading.Thread):
"""A simple thread subclass maintains a queue of functions to call."""
_STOP_QUEUE_ITEM = 'STOP'
def __init__(self):
"""Creates and starts a new thread."""
super(AsyncThread, self).__init__()
self._daemon = True
self._queue = Queue.Queue()
self.start()
def run(self):
"""This function is called automatically by the Thread class."""
try:
while(True):
item = self._queue.get(block=True)
if (self._STOP_QUEUE_ITEM == item):
break
ev3_func, cb, args, kwargs = item
cb(ev3_func(*args, **kwargs))
except KeyboardInterrupt:
pass
def stop(self):
"""Instructs the thread to exit after the current function is
finished.
"""
with self._queue.mutex:
self._queue.queue.clear()
self._queue.put(self._STOP_QUEUE_ITEM)
def put(self, ev3_func, cb, *args, **kwargs):
"""Adds a new function to the queue. The cb (callback) parameter should
be a function that accepts the result as its only parameter.
"""
self._queue.put((ev3_func, cb, args, kwargs))
| {
"repo_name": "inductivekickback/ev3",
"path": "ev3/async.py",
"copies": "1",
"size": "2034",
"license": "mit",
"hash": -7988026229714290000,
"line_mean": 21.3516483516,
"line_max": 79,
"alpha_frac": 0.5486725664,
"autogenerated": false,
"ratio": 4.355460385438972,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5404132951838972,
"avg_score": null,
"num_lines": null
} |
"""A simple three-species chemical kinetics system known as "Robertson's
example", as presented in:
H. H. Robertson, The solution of a set of reaction rate equations, in Numerical
Analysis: An Introduction, J. Walsh, ed., Academic Press, 1966, pp. 178-182.
"""
# exported from PySB model 'robertson'
import numpy
import scipy.integrate
import collections
import itertools
import distutils.errors
_use_cython = False
# try to inline a C statement to see if Cython is functional
try:
import Cython
except ImportError:
Cython = None
if Cython:
from Cython.Compiler.Errors import CompileError
try:
Cython.inline('x = 1', force=True, quiet=True)
_use_cython = True
except (CompileError,
distutils.errors.CompileError,
ValueError):
pass
Parameter = collections.namedtuple('Parameter', 'name value')
Observable = collections.namedtuple('Observable', 'name species coefficients')
Initial = collections.namedtuple('Initial', 'param_index species_index')
class Model(object):
def __init__(self):
self.y = None
self.yobs = None
self.y0 = numpy.empty(3)
self.ydot = numpy.empty(3)
self.sim_param_values = numpy.empty(6)
self.parameters = [None] * 6
self.observables = [None] * 3
self.initials = [None] * 3
self.parameters[0] = Parameter('k1', 0.040000000000000001)
self.parameters[1] = Parameter('k2', 30000000)
self.parameters[2] = Parameter('k3', 10000)
self.parameters[3] = Parameter('A_0', 1)
self.parameters[4] = Parameter('B_0', 0)
self.parameters[5] = Parameter('C_0', 0)
self.observables[0] = Observable('A_total', [0], [1])
self.observables[1] = Observable('B_total', [1], [1])
self.observables[2] = Observable('C_total', [2], [1])
self.initials[0] = Initial(3, 0)
self.initials[1] = Initial(4, 1)
self.initials[2] = Initial(5, 2)
code_eqs = '''
ydot[0] = (y[0]*p[0])*(-1.0) + (y[1]*y[2]*p[2])*1.0
ydot[1] = (y[0]*p[0])*1.0 + (pow(y[1], 2)*p[1])*(-1.0) + (y[1]*y[2]*p[2])*(-1.0)
ydot[2] = (pow(y[1], 2)*p[1])*1.0
'''
if _use_cython:
def ode_rhs(t, y, p):
ydot = self.ydot
Cython.inline(code_eqs, quiet=True)
return ydot
else:
def ode_rhs(t, y, p):
ydot = self.ydot
exec(code_eqs)
return ydot
self.integrator = scipy.integrate.ode(ode_rhs)
self.integrator.set_integrator('vode', method='bdf', with_jacobian=True)
def simulate(self, tspan, param_values=None, view=False):
if param_values is not None:
# accept vector of parameter values as an argument
if len(param_values) != len(self.parameters):
raise Exception("param_values must have length %d" %
len(self.parameters))
self.sim_param_values[:] = param_values
else:
# create parameter vector from the values in the model
self.sim_param_values[:] = [p.value for p in self.parameters]
self.y0.fill(0)
for ic in self.initials:
self.y0[ic.species_index] = self.sim_param_values[ic.param_index]
if self.y is None or len(tspan) != len(self.y):
self.y = numpy.empty((len(tspan), len(self.y0)))
if len(self.observables):
self.yobs = numpy.ndarray(len(tspan),
list(zip((obs.name for obs in self.observables),
itertools.repeat(float))))
else:
self.yobs = numpy.ndarray((len(tspan), 0))
self.yobs_view = self.yobs.view(float).reshape(len(self.yobs),
-1)
# perform the actual integration
self.integrator.set_initial_value(self.y0, tspan[0])
self.integrator.set_f_params(self.sim_param_values)
self.y[0] = self.y0
t = 1
while self.integrator.successful() and self.integrator.t < tspan[-1]:
self.y[t] = self.integrator.integrate(tspan[t])
t += 1
for i, obs in enumerate(self.observables):
self.yobs_view[:, i] = \
(self.y[:, obs.species] * obs.coefficients).sum(1)
if view:
y_out = self.y.view()
yobs_out = self.yobs.view()
for a in y_out, yobs_out:
a.flags.writeable = False
else:
y_out = self.y.copy()
yobs_out = self.yobs.copy()
return (y_out, yobs_out)
| {
"repo_name": "LoLab-VU/pysb",
"path": "doc/examples/robertson_standalone.py",
"copies": "5",
"size": "4685",
"license": "bsd-2-clause",
"hash": -6646888092559642000,
"line_mean": 35.0384615385,
"line_max": 80,
"alpha_frac": 0.5592315902,
"autogenerated": false,
"ratio": 3.4072727272727272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009732736399403065,
"num_lines": 130
} |
# A simple tool to connect to the Ensembl server and retrieve feature
# information using the Ensembl REST API.
from __future__ import print_function
import json
import optparse
from itertools import islice
import requests
from six.moves.urllib.parse import urljoin
parser = optparse.OptionParser()
parser.add_option('-i', '--input', help='List of Ensembl IDs')
parser.add_option('-e', '--expand', type='choice', choices=['0', '1'],
default='0',
help='Expands the search to include any connected features. e.g. If the object is a gene, its transcripts, translations and exons will be returned as well.')
parser.add_option('-f', '--format', type='choice',
choices=['full', 'condensed'], default='full',
help='Specify the formats to emit from this endpoint')
options, args = parser.parse_args()
if options.input is None:
raise Exception('-i option must be specified')
server = 'http://rest.ensembl.org'
ext = 'lookup/id'
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
params = dict((k, getattr(options, k)) for k in ['format', 'expand'])
first = True
print('{')
with open(options.input) as f:
while True:
ids = [line.strip() for line in islice(f, 50)]
if not ids:
break
if not first:
print(",")
data = {'ids': ids}
r = requests.post(urljoin(server, ext), params=params, headers=headers,
data=json.dumps(data))
if not r.ok:
r.raise_for_status()
print(r.text[1:-1])
first = False
print('}')
| {
"repo_name": "anilthanki/tgac-galaxytools",
"path": "tools/Ensembl-REST/get_feature_info.py",
"copies": "2",
"size": "1637",
"license": "mit",
"hash": -4122030656891136500,
"line_mean": 29.3148148148,
"line_max": 175,
"alpha_frac": 0.6188149053,
"autogenerated": false,
"ratio": 3.8069767441860467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5425791649486047,
"avg_score": null,
"num_lines": null
} |
# A simple tool to connect to the Ensembl server and retrieve feature
# information using the Ensembl REST API.
from __future__ import print_function
import json
import optparse
import requests
from six.moves.urllib.parse import urljoin
parser = optparse.OptionParser()
parser.add_option('-i', '--input', help='List of Ensembl IDs')
parser.add_option('-e', '--expand', type='choice', choices=['0', '1'],
default='0',
help='Expands the search to include any connected features. e.g. If the object is a gene, its transcripts, translations and exons will be returned as well.')
parser.add_option('-s', '--species', type='choice',
choices=['ensembl', 'ensemblgenomes'], default='ensembl',
help='Specify the genome databases for vertebrates and other eukaryotic species')
parser.add_option('-f', '--format', type='choice',
choices=['full', 'condensed'], default='full',
help='Specify the formats to emit from this endpoint')
options, args = parser.parse_args()
if options.input is None:
raise Exception('-i option must be specified')
server = 'http://rest.%s.org' % options.species
ext = 'lookup/id'
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
params = dict((k, getattr(options, k)) for k in ['format', 'expand'])
with open(options.input) as f:
ids = [line.strip() for line in f]
data = {'ids': ids}
r = requests.post(urljoin(server, ext), params=params, headers=headers,
data=json.dumps(data))
if not r.ok:
r.raise_for_status()
print(r.text)
| {
"repo_name": "PerlaTroncosoRey/tgac-galaxytools",
"path": "tools/Ensembl-REST/get_feature_info.py",
"copies": "2",
"size": "1619",
"license": "mit",
"hash": -6053193215913318000,
"line_mean": 36.6511627907,
"line_max": 175,
"alpha_frac": 0.6565781347,
"autogenerated": false,
"ratio": 3.6879271070615034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00036469344608879494,
"num_lines": 43
} |
# A simple tool to connect to the Ensembl server and retrieve genetree using
# the Ensembl REST API.
from __future__ import print_function
import optparse
import requests
from six.moves.urllib.parse import urljoin
parser = optparse.OptionParser()
parser.add_option('--id_type', type='choice', default='gene_id',
choices=['gene_id', 'gene_tree_id'], help='Input type')
parser.add_option('-i', '--input', help='Ensembl ID')
parser.add_option('--format', type='choice',
choices=['json', 'orthoxml', 'phyloxml', 'nh'],
default='json', help='Output format')
parser.add_option('-s', '--sequence', type='choice',
choices=['protein', 'cdna', 'none'], default='protein',
help='The type of sequence to bring back. Setting it to none results in no sequence being returned')
parser.add_option('-a', '--aligned', type='choice', choices=['0', '1'],
default='0', help='Return the aligned string if true. Otherwise, return the original sequence (no insertions)')
parser.add_option('-c', '--cigar_line', type='choice', choices=['0', '1'],
default='0',
help='Return the aligned sequence encoded in CIGAR format')
parser.add_option('--nh_format', type='choice',
choices=['full', 'display_label_composite', 'simple', 'species', 'species_short_name', 'ncbi_taxon', 'ncbi_name', 'njtree', 'phylip'],
default='simple',
help='The format of a NH (New Hampshire) request')
options, args = parser.parse_args()
if options.input is None:
raise Exception('-i option must be specified')
server = 'https://rest.ensembl.org'
if options.id_type == 'gene_id':
ext = 'genetree/member/id'
elif options.id_type == 'gene_tree_id':
ext = 'genetree/id'
if options.format == 'json':
content_type = 'application/json'
elif options.format == 'orthoxml':
content_type = 'text/x-orthoxml+xml'
elif options.format == 'phyloxml':
content_type = 'text/x-phyloxml+xml'
elif options.format == 'nh':
content_type = 'text/x-nh'
headers = {'Content-Type': content_type}
params = dict((k, getattr(options, k)) for k in ['sequence', 'aligned', 'cigar_line', 'nh_format'])
r = requests.get(urljoin(server, '/'.join([ext, options.input])), params=params, headers=headers)
if not r.ok:
r.raise_for_status()
print(r.text)
| {
"repo_name": "TGAC/earlham-galaxytools",
"path": "tools/Ensembl-REST/get_genetree.py",
"copies": "1",
"size": "2401",
"license": "mit",
"hash": 6423734922536142000,
"line_mean": 41.875,
"line_max": 152,
"alpha_frac": 0.6322365681,
"autogenerated": false,
"ratio": 3.49490538573508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9623317337908124,
"avg_score": 0.0007649231853913728,
"num_lines": 56
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.