text stringlengths 0 1.05M | meta dict |
|---|---|
# Aravind Kumar - aravind@arvizard.com
import maya.cmds as cmds
def main():
"""
This function switches between stereo cameras in the scene. The requirement is that the cameras be grouped or named with the words left, right, _lt or _rt.
The zoom and pan attributes are also replicated.
"""
panel = cmds.getPanel(wf=True)
camera = (cmds.modelPanel(panel, q=True, camera=True), cmds.listRelatives(cmds.modelPanel(panel, q=True, camera=True),s=True)[0])
secondary_camera = ""
all_cameras = [cam for cam in cmds.listCameras() if not cmds.camera(cam,sc=True, q=True)]
parent_group = cmds.listRelatives(camera[0], parent=True)
found = False
# This part checks if there is a pair of cameras in the parent group of the active camera
if parent_group:
objs_in_grp = cmds.listRelatives(parent_group,c=True)
for obj in objs_in_grp:
if obj in all_cameras and obj != camera[0]:
secondary_camera = (obj, cmds.listRelatives(obj, s=True)[0])
found = True
break
# This part moves on to find stereo pairs based on the names on the cameras
if found == False:
for obj in all_cameras:
if ("left" in obj or "right" in obj or "_lt" in obj or "_rt" in obj) and obj != camera:
secondary_camera = (obj, cmds.listRelatives(obj, s=True)[0])
found = True
if found:
cmds.lookThru(secondary_camera[0])
cmds.setAttr(secondary_camera[1]+".panZoomEnabled",
cmds.getAttr(camera[0]+".panZoomEnabled"))
if cmds.getAttr(secondary_camera[0]+".horizontalPan", se=True):
cmds.setAttr(secondary_camera[0]+".horizontalPan",
cmds.getAttr(camera[0]+".horizontalPan"))
if cmds.getAttr(secondary_camera[0]+".verticalPan", se=True):
cmds.setAttr(secondary_camera[0]+".verticalPan",
cmds.getAttr(camera[0]+".verticalPan"))
if cmds.getAttr(secondary_camera[0]+".zoom", se=True):
cmds.setAttr(secondary_camera[0]+".zoom",
cmds.getAttr(camera[0]+".zoom"))
else:
cmds.warning("Stereo camera not found.")
if __name__ == '__main__':
main() | {
"repo_name": "arvizard1/vfxTools",
"path": "vizTools/stereoSwitch.py",
"copies": "1",
"size": "2227",
"license": "mit",
"hash": 7034216715824322000,
"line_mean": 46.4042553191,
"line_max": 159,
"alpha_frac": 0.620116749,
"autogenerated": false,
"ratio": 3.603559870550162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4723676619550162,
"avg_score": null,
"num_lines": null
} |
# Aravind Kumar - aravind@arvizard.com
import maya.cmds as cmds
def main(change_by=0):
"""
This function shifts animation of selected
objects based on the number of frames
by the user.
:param change_by: The offset value for shifting animation
:return: None
"""
selected_objects = cmds.ls(selection=True)
if selected_objects:
cmds.keyframe(selected_objects, edit=True,relative=True,timeChange=change_by)
return
if __name__ == '__main__':
result = cmds.promptDialog(title="Offset keys",
message="Enter offset frames",
button=['Ok','Cancel'],
defaultButton = 'Ok',
cancelButton ='Cancel',
dismissString ='Cancel')
if result =='Ok':
try:
value = int(cmds.promptDialog(q=True, text=True))
except ValueError:
cmds.warning("Enter a valid number. Example: 1 "
"for a positive offset / -2 for a negative offset")
raise(ValueError)
main(int(cmds.promptDialog(q=True, text=True))) | {
"repo_name": "arvizard1/vfxTools",
"path": "vizTools/shiftAnimation.py",
"copies": "1",
"size": "1118",
"license": "mit",
"hash": -2322583077349957000,
"line_mean": 30.9714285714,
"line_max": 85,
"alpha_frac": 0.5805008945,
"autogenerated": false,
"ratio": 4.125461254612546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5205962149112545,
"avg_score": null,
"num_lines": null
} |
"""
Command to train red/blue classifiers from the command line.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from datetime import datetime
from arbiter.models import Estimator, Score
from django.contrib.auth.models import User
from corpus.reader import TranscriptCorpusReader
from corpus.models import Corpus, Document, LabeledDocument
from corpus.reader import QueryCorpusReader, CorpusModelReader
from corpus.learn import CorpusLoader, build_model
from django.core.management.base import BaseCommand, CommandError
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
##########################################################################
## Training Command
##########################################################################
class Command(BaseCommand):
help = "Trains red/blue classifiers and stores them in the database."
# The types of estimators that this command knows how to train
estimators = {
'maxent': (LogisticRegression, {}),
'svm': (SGDClassifier, {'loss':'hinge', 'penalty':'l2', 'alpha':1e-3}),
'nbayes': (MultinomialNB, {}),
}
# The minimum number of documents to train an estimator
min_docs = 12
def add_arguments(self, parser):
"""
Add command line argparse arguments.
"""
# Model selection argument
parser.add_argument(
'-m', '--model', choices=self.estimators, default='maxent',
help='specify the model form to fit on the given corpus',
)
# Number of folds for cross-validation
parser.add_argument(
'-f', '--folds', type=int, default=12,
help='number of folds to use in cross-validation',
)
# Optional ownership argument/build model for user
parser.add_argument(
'-u', '--username', default=None, metavar='NAME',
help='specify a user to build the model for or to assign ownership',
)
# Path on disk to build a corpus from transcripts
parser.add_argument(
'-t', '--transcripts', default=None, type=str, metavar='PATH',
help='specify a path on disk to the directory containing transcripts',
)
# Specify a corpus id to specifically build for
parser.add_argument(
'-c', '--corpus', type=int, default=None, metavar='ID',
help='specify the id of a corpus to build the model for',
)
def handle(self, *args, **options):
"""
Handles the model training process as follows:
1. If a transcript path is specified build that and assign to the
owner if given in the arguments (ignore other args)
2. If a corpus id is specified, build the model for that corpus and
assign to the owner if given in the arguments
3. If just a username is given, construct a user-specific corpus
and build a model for that corpus
4. If none of those arguments are given, construct a corpus that
utilizes the entire state of the current database, and build
a model for that corpus.
Note that items 1 and 2 do not create a corpus, whereas 3 and 4 do.
"""
# Get the owner from the options
owner = self.get_user(options['username'])
# Create the reader from the options
if options['transcripts']:
# Get the transcripts reader
reader = TranscriptCorpusReader(options['transcripts'])
corpus = None
description = "transcripts located at {}".format(options['transcripts'])
else:
# Get or create the corpus object
reader, corpus = self.get_corpus(owner=owner, **options)
if corpus:
description = str(reader.corpus)
else:
description = "Corpus read by {}".format(
reader.__class__.__name__
)
# Build the model from the corpus and owner.
estimator = self.build_model(reader, owner, description, **options)
# If corpus, assign it to the estimator and save
if corpus:
estimator.corpus = corpus
estimator.save()
def build_model(self, reader, owner, description, **options):
"""
Once the reader has been
"""
# Get the details from the command line arguments
model, kwargs = self.estimators[options['model']]
# Construct the loader from the passed in reader object.
loader = CorpusLoader(reader, options['folds'])
# Inform the user that the training process is beginning
self.stdout.write((
"Starting training of {} {} models on {}\n"
"This may take quite a bit of time, please be patient!\n"
).format(
loader.n_folds + 1, model.__name__, description
))
# GO! Build the model forever! Whooo!!!
(clf, scores), total_time = build_model(loader, model, **kwargs)
# Save the estimator model
estimator = Estimator.objects.create(
model_type = Estimator.TYPES.classifier,
model_class = model.__name__,
model_form = repr(clf),
estimator = clf,
build_time = total_time,
owner = owner,
)
# Save the scores objects.
for metric, values in scores.items():
# Handle the time key in particular.
if metric == 'times':
Score.objects.create(
metric = Score.METRICS.time,
score = values['final'].total_seconds(),
folds = [td.total_seconds() for td in values['folds']],
estimator = estimator,
)
continue
# Handle generic scores for the model
for label, folds in values.items():
if metric == 'support' and label == 'average':
# This will be an array of None values, so skip.
continue
Score.objects.create(
metric = metric,
score = np.asarray(folds).mean(),
label = label,
folds = folds,
estimator = estimator,
)
# Report model construction complete
self.stdout.write(
"Training complete in {}! Estimator saved to the database\n".format(total_time)
)
return estimator
def get_user(self, username):
"""
Returns a user or None, raising a command error if no user with the
specified username is found in the database.
"""
if username is None: return None
try:
return User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError(
"No user with username '{}' in the database".format(username)
)
def get_corpus(self, owner=None, **options):
"""
Uses the supplied options to get or create a corpus from the args
that have been passed in. Note can raise a CommandError for not enough
documents in a constructed corpus.
Returns a corpus model reader object as well as a corpus.
"""
# If an ID is supplied fetch the corpus from the database.
if options['corpus']:
try:
corpus = Corpus.objects.get(id=options['corpus'])
reader = CorpusModelReader(corpus)
return reader, corpus
except Corpus.DoesNotExist:
raise CommandError(
"No corpus with id {} in the database".format(options['corpus'])
)
# If an owner is supplied then create a corpus for that specific user.
if owner is not None:
corpus = Corpus.objects.create_for_user(
owner, title="{} user corpus created on {}".format(
owner.username, datetime.now().strftime("%Y-%m-%d")
)
)
# Create a corpus from every document that has annotator agreement!
else:
corpus = Corpus.objects.create(
labeled=True, title="global corpus created on {}".format(
datetime.now().strftime("%Y-%m-%d")
)
)
for document in Document.objects.all():
label = document.label()
if label is not None:
LabeledDocument.objects.create(
corpus=corpus, document=document, label=label,
)
# Perform the check for the corpus count.
if corpus.documents.count() < self.min_docs:
corpus.delete() # Delete any too small corpora
raise CommandError(
"Could not create a corpus with less than {} documents".format(self.min_docs)
)
# Otherwise return the corpus
return CorpusModelReader(corpus), corpus
| {
"repo_name": "DistrictDataLabs/partisan-discourse",
"path": "arbiter/management/commands/train.py",
"copies": "1",
"size": "9715",
"license": "apache-2.0",
"hash": -6369423229458772000,
"line_mean": 36.0801526718,
"line_max": 93,
"alpha_frac": 0.5615028307,
"autogenerated": false,
"ratio": 4.8868209255533195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00494831551151249,
"num_lines": 262
} |
"""
Model definitions for the arbiter app.
"""
##########################################################################
## Imports
##########################################################################
from django.db import models
from model_utils import Choices
from partisan.utils import nullable
from model_utils.models import TimeStampedModel
from picklefield.fields import PickledObjectField
from django.contrib.postgres.fields import ArrayField
##########################################################################
## Estimator Model
##########################################################################
class Estimator(TimeStampedModel):
"""
Stores a Scikit-Learn Estimator object as a pickle in the database.
"""
# Model types to help decide on evaluation criteria
TYPES = Choices('classifier', 'regression', 'clusters', 'decomposition')
model_type = models.CharField(choices=TYPES, max_length=32) # The type of the estimator
model_class = models.CharField(max_length=255, **nullable) # The class name of the estimator
model_form = models.CharField(max_length=512, **nullable) # The repr of the estimator
estimator = PickledObjectField(**nullable) # The pickled object model
build_time = models.DurationField(**nullable) # The amount of time it took to buld
owner = models.ForeignKey('auth.User', **nullable) # The owner, if any, of the model
corpus = models.ForeignKey('corpus.Corpus', **nullable) # The corpus the estimator was trained on
class Meta:
db_table = "estimators"
get_latest_by = "created"
def __str__(self):
s = "{} {} ({})".format(
self.model_class, self.model_type.title(), self.created.strftime('%Y-%m-%d')
)
if self.owner:
s += " for {}".format(self.owner)
return s
class Score(TimeStampedModel):
"""
Stores an evaluation metric for an estimator.
"""
# Metrics define how a specific estimator is scored
METRICS = Choices(
'accuracy', 'auc', 'brier', 'f1', 'fbeta', 'hamming', 'hinge',
'jaccard', 'logloss', 'mcc', 'precision', 'recall', 'roc', 'support',
'mae', 'mse', 'mdae', 'r2',
'rand', 'completeness', 'homogeneity', 'mutual', 'silhouette', 'v',
'time',
)
metric = models.CharField(choices=METRICS, max_length=32) # The type of the score
score = models.FloatField(**nullable) # The actual value of the score
label = models.CharField(max_length=32, **nullable) # The label, if any, of the score
folds = ArrayField(models.FloatField(), **nullable) # Cross-validation scores
estimator = models.ForeignKey(Estimator, related_name='scores') # The estimator being evaluated
class Meta:
db_table = "evaluations"
get_latest_by = "created"
def __str__(self):
s = "{} score for {} = {:0.3f}".format(
self.metric.title(), self.estimator, self.score
)
if self.label:
s = "{} ".format(self.label.title()) + s
return s
| {
"repo_name": "DistrictDataLabs/partisan-discourse",
"path": "arbiter/models.py",
"copies": "1",
"size": "3479",
"license": "apache-2.0",
"hash": -3879845800791368000,
"line_mean": 35.2395833333,
"line_max": 106,
"alpha_frac": 0.5763150331,
"autogenerated": false,
"ratio": 4.0642523364485985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5140567369548599,
"avg_score": null,
"num_lines": null
} |
""" Arbitrage tools
Tools for determining the
"""
import os
from decimal import Decimal
import pandas as pd
# Removed as it complicates the bot on server deploys (?)
# import seaborn as sns
# sns.set_context(font_scale=1.1)
KRAKEN_API_KEY = os.environ.get('KRAKEN_API_KEY')
KRAKEN_PRIVATE_KEY = os.environ.get('KRAKEN_PRIVATE_KEY')
BITX_KEY = os.environ.get('BITX_KEY')
BITX_SECRET = os.environ.get('BITX_SECRET')
ICE3X_KEY = os.getenv('ICE3X_KEY') # .encode('utf-8')
ICE3X_PUBLIC = os.getenv('ICE3X_PUBLIC') # .encode('utf-8')
COIN_MAP = {
'ice3x': {
'bitcoin': dict(
coin_code='XBT',
coin_name='Bitcoin',
exchange_name='Ice3x'),
'litecoin': dict(
coin_code='LTC',
coin_name='Litecoin',
exchange_name='Ice3x'),
'ethereum': dict(
coin_code='ETH',
coin_name='Ethereum',
exchange_name='Ice3x')
},
'luno': {
'bitcoin': dict(
coin_code='XBT',
coin_name='Bitcoin',
exchange_name='Luno'),
},
'kraken': {
'bitcoin': dict(
coin_code='XBT',
coin_name='Bitcoin',
exchange_name='Kraken'),
'litecoin': dict(
coin_code='LTC',
coin_name='Litecoin',
exchange_name='Kraken'),
'ethereum': dict(
coin_code='ETH',
coin_name='Ethereum',
exchange_name='Kraken'),
},
}
def get_forex_buy_quote(currency_code: str = 'EUR', source: str = 'FNB', order_type: str = 'buy'):
"""Get latest forex from FNB website
"""
if source == 'FNB':
tables = pd.read_html(
'https://www.fnb.co.za/Controller?nav=rates.forex.list.ForexRatesList',
index_col=1, header=0, match=currency_code)
df = tables[0]
types = {
'buy': 'Bank Selling Rate',
'sell': 'Bank Buying Rate',
}
exhange_rate = df.loc[currency_code, types[order_type]]
return Decimal("%.4f" % float(exhange_rate))
def kraken_order_book(book_type: str, currency_code: str = 'EUR', coin_code: str = 'XBT'):
"""Kraken specific orderbook retrieval
"""
import krakenex
kraken_api = krakenex.API(key=KRAKEN_API_KEY, secret=KRAKEN_PRIVATE_KEY, conn=krakenex.Connection())
pair = f'X{coin_code}Z{currency_code}'
orders = kraken_api.query_public('Depth', {'pair': pair})
df = pd.DataFrame(
orders['result'][pair][book_type],
columns=['price', 'volume', 'timestamp'])
return df
def luno_order_book(book_type: str, currency_code: str = 'ZAR'):
"""
Args:
book_type: 'asks' or 'bids'
currency_code: Default = 'ZAR'.
Returns: Dataframe with order book.
"""
from bitrader import bitx
bitx_api = bitx.BitX(BITX_KEY, BITX_SECRET)
df = bitx_api.get_order_book_frame()
return df[book_type]
def ice3x_order_book(book_type: str, coin_code: str = 'BTC', currency_code: str = 'ZAR'):
"""Ice3X specific orderbook retrieval
"""
from bitrader.api_tools import Ice3xAPI
ice = Ice3xAPI(cache=False, future=False)
pair_map = {
'XBT': 3,
'LTC': 6,
'ETH': 11,
}
pair_id = pair_map[coin_code]
r = ice.get_resource(
'generic',
api_method='orderbook',
api_action='info',
api_params=f'type={book_type}&pair_id={pair_id}',
data_format='raw')
bids = pd.DataFrame(r['response'].json()['response']['entities'])
return bids
def prepare_order_book(order_book, book_type: str, bitcoin_column: str = 'volume', currency_column: str = 'price'):
"""Function for getting order book in standard form
:param: book_type bids or asks
asks is what I'll have to pay if I want to buy
bids is what I'll get if I want to sell
"""
options = {
'ascending': {
'bids': False,
'asks': True, }}
df = order_book.copy()
df = df.astype(float)
df = df.sort_values(by=currency_column, ascending=options['ascending'][book_type]).reset_index(drop=True)
df['value'] = df[currency_column] * df[bitcoin_column]
df['cumulative_volume'] = df[bitcoin_column].cumsum()
df['cumulative_value'] = df.value.cumsum()
return df
def coin_exchange(df, limit, order_type: str, bitcoin_column: str = 'volume', currency_column: str = 'value'):
"""Convert specified amount of bitcoin to currency or currency to bitcoin
:param: order_type buy or sell
buy exchanges currency for bitcoin
sell exchanges bitcoins for currency
"""
options = {
'buy': {'from': currency_column, 'to': bitcoin_column},
'sell': {'from': bitcoin_column, 'to': currency_column}
}
filtered = df.loc[df['cumulative_%s' % options[order_type]['from']] < float(limit), :]
rows = filtered.shape[0]
over = Decimal(df.loc[rows, 'cumulative_%s' % options[order_type]['from']]) - limit
price = Decimal(df.loc[rows, 'price'])
if order_type == 'buy':
over_convert = over / price
else:
over_convert = over * price
result = Decimal(df.loc[rows, 'cumulative_%s' % options[order_type]['to']]) - over_convert
return result
def get_books(coin_code: str = 'XBT', exchange_name: str = 'Luno'):
"""
:param coin_code: BTC, LTC, or ETH
:param exchange_name: Luno or Ice3x
:return:
"""
eur_asks = prepare_order_book(kraken_order_book('asks', coin_code=coin_code), 'asks')
if exchange_name.lower() == 'luno':
zar_bids = prepare_order_book(luno_order_book('bids'), 'bids')
elif exchange_name.lower() == 'ice3x':
zar_bids = prepare_order_book(ice3x_order_book('bid', coin_code=coin_code), 'bids', bitcoin_column='amount')
else:
raise KeyError(f'{exchange_name} is not a valid exchange_name')
return eur_asks, zar_bids
def arbitrage(amount, coin_code='XBT', coin_name='bitcoin', exchange_name='Luno',
exchange_rate=None, transfer_fees: bool = True, verbose: bool = False, books=None, trade_fees: bool = True):
"""
:param amount: The amount in ZAR (TODO: also allow reverse
:param coin_code: Default = XBT. LTC and ETH also supported.
:param coin_name: Default = Bitcoin. Litecoin and Ethereum also supported
:param exchange_name: Luno or Ice3x.
:param exchange_rate: The ZAR / EURO Exchange rate.
:param transfer_fees: Whether to include FOREX fees or not. E.g. when you want to simulate money alrady in Europe.
:param verbose: Default = False. Whether to print the summary to command line
:param books:
:return: Dict with ROIC and summary of arbitrage
TODO:
Make coin_code, coin_name, exchange_name a NamedTuple or something.
Even better, make Exchange, Bank, Coin classes and build in stuff like exchange rates.
"""
if not books:
try:
eur_asks, zar_bids = get_books(coin_code=coin_code, exchange_name=exchange_name)
except KeyError:
return 'Error processing order books. Check if the exchanges are working and that there are open orders.'
else:
eur_asks, zar_bids = books
try:
transfer_amount = Decimal(amount)
except (ValueError, AttributeError):
return 'Sorry, could not read reply.'
try:
if not exchange_rate:
exchange_rate = get_forex_buy_quote('EUR')
if transfer_fees:
_swift_fee = Decimal(110)
_fnb_comission = min(max(transfer_amount * Decimal(0.0055), Decimal(140)), Decimal(650))
_kraken_deposit_fee = Decimal(15) # Fees: https://www.kraken.com/en-us/help/faq
else:
_swift_fee = Decimal(0)
_fnb_comission = Decimal(0)
_kraken_deposit_fee = Decimal(0)
capital = transfer_amount + _fnb_comission + _swift_fee
euros = transfer_amount / exchange_rate - _kraken_deposit_fee
_kraken_fee = euros * Decimal(0.0026) # TODO: Allow to specify lower tier, e.g. over $50k = 0.0024
_kraken_withdrawal_fee = Decimal(0.001)
_luno_deposit_fee = Decimal(0.0002)
bitcoins = coin_exchange(eur_asks, euros - _kraken_fee , 'buy') - _kraken_withdrawal_fee - _luno_deposit_fee
if trade_fees:
_luno_fees = bitcoins * Decimal(0.01) # TODO: Allow to specify lower tier, e.g. over 10 BTC = 0.0075
else:
_luno_fees = Decimal(0)
if transfer_fees:
_luno_withdrawel_fee = Decimal(8.5) # TODO: Check Ice3x fees
else:
_luno_withdrawel_fee = Decimal(0)
rands = coin_exchange(zar_bids, bitcoins - _luno_fees, 'sell')
btc_zar_exchange_rate = rands / (bitcoins - _luno_fees)
return_value = rands - _luno_withdrawel_fee
total_fees = (
_swift_fee +
_fnb_comission +
_kraken_fee * exchange_rate +
_kraken_deposit_fee * exchange_rate +
_kraken_withdrawal_fee * btc_zar_exchange_rate +
_luno_deposit_fee * btc_zar_exchange_rate +
_luno_fees * btc_zar_exchange_rate +
_luno_withdrawel_fee)
response = [
f'Rands out: {capital:.2f}',
f'# forex conversion: R{_swift_fee + _fnb_comission:.2f}',
f'Euro: {euros:.2f}',
f'# kraken deposit and withdraw fee: R{(_kraken_deposit_fee * exchange_rate) + (_kraken_withdrawal_fee * btc_zar_exchange_rate):.2f}',
f'# kraken trade fee: R{(_kraken_fee * exchange_rate):.2f}',
f'{coin_name}: {bitcoins:.8f}',
f'# {exchange_name} deposit and withdraw fee: R{_luno_withdrawel_fee + (_luno_deposit_fee * btc_zar_exchange_rate):.2f}',
f'# {exchange_name} trade fee: R{(_luno_fees * btc_zar_exchange_rate):.2f}',
f'Rands in: {rands:.2f}',
'--------------------',
f'Profit: {return_value - capital:.2f}',
f'ROI: {((return_value - capital) / capital) * 100:.2f}',
'--------------------',
f'ZAR/EUR: {exchange_rate:.2f}',
f'EUR/{coin_code}: {(euros - _kraken_fee) / bitcoins:.2f}',
f'{coin_code}/ZAR: {btc_zar_exchange_rate:.2f}',
'--------------------',
f'Total fees: R{total_fees:.2f}',
]
if verbose:
print('\n'.join(response))
return {'roi': ((return_value - capital) / capital) * 100, 'summary': '\n'.join(response)}
except KeyError:
return "Don't be greedy, that's too much!"
def optimal(max_invest: int = 1000000, coin: str = 'bitcoin', exchange='luno', return_format: str = 'text',
exchange_rate: Decimal = None):
"""
Args:
max_invest:
coin: bitcoin, litecoin, ethereum
exchange: luno, ice3x or kraken
return_format: text or picture
exchange_rate:
"""
if not exchange_rate:
exchange_rate = get_forex_buy_quote('EUR')
books = get_books(
coin_code=COIN_MAP[exchange][coin]['coin_code'],
exchange_name=COIN_MAP[exchange][coin]['exchange_name']
)
results = []
for amount in range(5000, max_invest, 5000):
try:
results.append(
dict(
amount=amount, roi=arbitrage(
amount=amount,
exchange_rate=exchange_rate,
books=books,
transfer_fees=True,
**COIN_MAP[exchange][coin],
)['roi']))
except Exception as e:
print(e)
break
df = pd.DataFrame(results)
df.amount = df.amount.astype(float)
df = df.set_index('amount')
df.roi = df.roi.astype(float)
max_roi = df.max()
try:
near_optimal = df.loc[df.roi > max_roi * (1 - 0.001)].reset_index()
invest_amount = near_optimal.iloc[0].amount
invest_roi = near_optimal.iloc[0].roi
except:
return df
if return_format == 'text':
return f'Ideal invest amount: {invest_amount} with ROI of {invest_roi:.2f}'
elif return_format == 'values':
return invest_amount, near_optimal
elif return_format == 'raw':
return df
elif return_format == 'png':
raise NotImplementedError('Not yet implemented')
else:
raise KeyError(f'Invalid return_format selection {return_format}')
def reverse_arb(amount, coin='litecoin', exchange_buy='ice3x', exchange_sell='kraken'):
"""
:param amount:
:param coin:
:return:
"""
if coin in ['litecoin', 'ethereum']:
zar_asks = prepare_order_book(
ice3x_order_book(
'ask', coin_code=COIN_MAP[exchange_buy][coin]['coin_code']), 'asks', bitcoin_column='amount')
else:
zar_asks = prepare_order_book(luno_order_book('asks'), 'asks')
eur_bids = prepare_order_book(
kraken_order_book('bids', coin_code=COIN_MAP[exchange_sell][coin]['coin_code']), 'asks')
coins = coin_exchange(zar_asks, amount, 'buy')
euro = coin_exchange(eur_bids, coins, 'sell')
zar_asks = prepare_order_book(luno_order_book('bids'), 'bids')
exchange_rate = get_forex_buy_quote('EUR')
rands = euro * exchange_rate
return f'R{amount:.0f}, R{rands:.0f}, {(rands - amount)/amount * 100:.2f}%'
| {
"repo_name": "jr-minnaar/bitrader",
"path": "bitrader/arbitrage_tools.py",
"copies": "1",
"size": "13425",
"license": "mit",
"hash": 6002935298190810000,
"line_mean": 31.1172248804,
"line_max": 146,
"alpha_frac": 0.5777281192,
"autogenerated": false,
"ratio": 3.2537566650508967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43314847842508963,
"avg_score": null,
"num_lines": null
} |
# Arbitrary allocations in target memory.
# This module should be included by the module that 'owns' the allocation only.
# Global scratchpad memory. This is the address of the biggest safest area of
# read-write-execute RAM we can guesstimate about. This is provided as a
# default location for the backdoor commands here to hastily bludgeon data
# into. How did we find it? Guesswork! Also, staring at memsquares!
#
# This is halfway through DRAM, in a spot that seems to exhibit uninitialized
# memory patterns even after quite a lot of time running memsquare.py
# Hopefully our code won't get trashed by a SCSI packet!
pad = 0x1e00000
# Buffer used for code, e.g. %ec, %ecc, %ea, %tea
shell_code = 0x1e40000
# Slightly longer-lived code for %hook
hook_code = 0x1e44000
# Backdoor stubs
bitbang_backdoor = 0x1e48000
cpu8051_backdoor = 0x1e49000
# Bounce buffer for getting data to/from other CPUs via the ARM
bounce_buffer = 0x1e4f000
bounce_buffer_size = 0x1000
# Our ring buffer is 64 KiB. The default location comes from
# more guesswork and memsquares. It's 1MB above the default
# pad, still in an area of DRAM that seems very lightly used.
console_address = 0x1e50000
| {
"repo_name": "scanlime/coastermelt",
"path": "backdoor/target_memory.py",
"copies": "1",
"size": "1196",
"license": "mit",
"hash": 1923228530781052200,
"line_mean": 30.4736842105,
"line_max": 79,
"alpha_frac": 0.7617056856,
"autogenerated": false,
"ratio": 3.1556728232189974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9407413202847195,
"avg_score": 0.001993061194360375,
"num_lines": 38
} |
"""Arbitrary expressions evaluated as an AST."""
from operator import and_
from operator import not_
from operator import or_
from operator import xor
# Abstract base classes for nodes.
class BaseNode(object):
"""Abstract base class for permission nodes."""
# Unary operators.
def __invert__(self):
return InvertNode(self)
# Binary operators.
def __and__(self, other):
return AndNode(self, other)
def __rand__(self, other):
return self & other
def __or__(self, other):
return OrNode(self, other)
def __ror__(self, other):
return self | other
def __xor__(self, other):
return XorNode(self, other)
def __rxor__(self, other):
return self ^ other
# Comparisons.
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
return not (self == other)
def __bool__(self):
raise TypeError(
"You probably tried to do 'group1 or/and group2' which, due to "
"python's semantics, only returns one group. Use a bitwise "
"operator like &, |, or ^ instead.")
__nonzero__ = __bool__
class ValueNode(BaseNode):
"""A Node which always returns a given value."""
def __init__(self, value):
self.value = value
def __hash__(self):
return hash((type(self), self.value))
def __str__(self):
return str(self.value)
def __repr__(self):
return "ValueNode(%s)" % repr(self.value)
def __eq__(self, other):
return self.__class__ is other.__class__ and self.value == other.value
class RolesNode(BaseNode):
def __init__(self, *roles):
self._roles_set = frozenset(role.lower() for role in roles)
def __hash__(self):
return hash((type(self), self._roles_set))
def get_roles_set(self, **kwargs):
return self._roles_set
def __and__(self, other):
if type(other) is type(self):
# We can reduce the node complexity by simply combining roles sets.
return self.__class__(*(self._roles_set | other._roles_set))
return super(RolesNode, self).__and__(other)
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self._roles_set == other._roles_set)
def __str__(self):
return '{%s}' % (', '.join(sorted(self._roles_set)))
def __repr__(self):
return ('RolesNode(%s)' %
(', '.join('"%s"' % role for role in sorted(self._roles_set))))
class DynamicRolesNode(RolesNode):
"""DynamicRolesNodes take callables that return a set of groups.
The primary use case is when you don't know the required LDAP groups
until runtime. For example, say you have a BlogPost model that is put
into one of the categories {'Programming', 'Food', 'Travel'}. Each of
your users is in only one of those categories, so you want to allow
posting and editing of content only if the user is authorized for that
category. You can do this with a DynamicRolesNode:
class Category(models.Model):
name = models.CharField(...)
class BlogPost(models.Model):
title = models.CharField(...)
body = models.TextField(...)
category = models.ForeignKey(Category)
def _category_name(category_id):
return {Category.objects.get(id=category_id).name}
def _blog_post_category(request):
"Return the category the User is trying to post into."
# Note that this callable must return a set!
return _category_name(request.POST['category_id'])
@requires(post=DynamicRolesNode(_blog_post_category))
def create_post(request):
# The user's group membership will be checked on POST to verify
# that they are in the <category> LDAP group.
...
def _edit_post_category(request):
return _category_name(BlogPost.objects.get(
id=request.GET['post_id']).category_id)
@requires(DynamicRolesNode(_edit_post_category))
def edit_post(request, post_id):
# The user can only edit posts in their authorized category.
...
The callable should take kwargs. Note that for Django requests, the
kwarg `request` will be populated with the current request. The callable
should return a set of group names.
Note that DynamicRolesNodes are not compatible with Django admin panels
because the request is not available to the auth backend when checking
permissions.
"""
def __init__(self, *roles_callables):
self._roles_set = frozenset(roles_callables)
def get_roles_set(self, **kwargs):
roles = set()
for _callable in self._roles_set:
result = _callable(**kwargs)
if not isinstance(result, set):
raise RuntimeError("The callable must return a set.")
roles |= result
return roles
def __str__(self):
return '{%s}' % self._roles_set
def __repr__(self):
return 'DynamicRolesNode(%s)' % self._roles_set
# Operator nodes.
class OperatorNode(BaseNode):
# Children must define a string that represents this operator.
display_name = None
# Children must define an integer arity attribute.
arity = None
# Children must define an operator callable with the same arity as
# specified above.
operator = None
def __init__(self, *operands):
if len(operands) != self.arity:
raise ValueError('Incorrect number of operands for %s: %d. '
'Expected %d.' % (self.__class__.__name__,
len(operands), self.arity))
for operand in operands:
if not isinstance(operand, BaseNode):
raise TypeError('%r is not a child of BaseNode.' %
operand)
# TODO: Check if operand is an instance of basestring and cast to
# a BaseNode?
self._operands = tuple(operands)
def __hash__(self):
return hash((type(self), self._operands))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self._operands == other._operands)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(map(repr, self._operands)))
class InvertNode(OperatorNode):
display_name = '~'
arity = 1
operator = not_
class AndNode(OperatorNode):
display_name = '&'
arity = 2
operator = and_
class OrNode(OperatorNode):
display_name = '|'
arity = 2
operator = or_
class XorNode(OperatorNode):
display_name = '^'
arity = 2
operator = xor
| {
"repo_name": "counsyl/baya",
"path": "baya/membership.py",
"copies": "1",
"size": "6826",
"license": "mit",
"hash": 6787221984090379000,
"line_mean": 29.6098654709,
"line_max": 79,
"alpha_frac": 0.5890711984,
"autogenerated": false,
"ratio": 4.205791743684535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 223
} |
'''arbitrary recursions copy registry
'''
from __future__ import with_statement
from __future__ import absolute_import
from collections import defaultdict
from .func import identity, noop, compose
from .ctxsingleton import CtxSingleton
from .multimethod import MultiMethod, defmethod
from .atypes import as_optimized_type, anytype, union, intersection, typep
from .atypes.etypes import recursions_type
from .atypes.ptypes import atomic_type
from .collections import OrderedDict, OrderedDefaultDict, OrderedSet
__all__ = ['make_copy','copy','state','copy_obj','recursions_type','get_copy', 'set_copy']
class CopyState(CtxSingleton):
def _cxs_setup_top(self):
self.memo = None
self.recursions = None
state = CopyState()
def make_copy(op, recursions=None):
assert typep(recursions, recursions_type)
with state(recursions=recursions, memo={}):
return copy(op)
_holder = object()
def copy(op):
recursions = state.recursions
if recursions is not None and recursions==0:
return op
memo = state.memo
i = id(op)
try:
cp = memo[i]
except KeyError:
memo[i] = _holder
if recursions is None:
cp = copy_obj(op)
else:
with state(recursions=recursions-1, memo=memo):
cp = copy_obj(op)
memo[i] = cp
if cp is _holder:
cyclic_error(op)
return cp
def get_copy(op):
try:
op = state.memo[id(op)]
except KeyError:
raise ValueError("copy not yet in progress for %r" % (op,))
else:
if op is _holder:
cyclic_error(op)
return op
def set_copy(op, cp):
memo = state.memo
try:
existing = memo[id(op)]
except KeyError:
pass
else:
if existing is not _holder and existing is not cp:
raise RuntimeError("replacing copy")
memo[id(op)] = cp
return cp
def cyclic_error(op):
raise RuntimeError("cyclic copy for %r id=%d" % (op, id(op)))
copy_obj = MultiMethod('copy_obj',
doc='arbitrary recursions copying registry')
#by default no copy_objing
defmethod(copy_obj, [anytype])(identity)
#atomic types
defmethod(copy_obj, [atomic_type])(identity)
@defmethod(copy_obj, [list])
def meth(l):
return map(copy, l)
@defmethod(copy_obj, [tuple])
def meth(t):
return tuple(map(copy, t))
@defmethod(copy_obj, [(dict, OrderedDict)])
def meth(d):
return type(d)([[copy(k),copy(v)] for k,v in d.iteritems()])
@defmethod(copy_obj, [(set, frozenset, OrderedSet)])
def meth(s):
return type(s)([copy(x) for x in s])
@defmethod(copy_obj, [(defaultdict, OrderedDefaultDict)])
def meth(d):
return type(d)(d.default_factory, [[copy(k),copy(v)] for k,v in d.iteritems()])
| {
"repo_name": "matthagy/Jamenson",
"path": "jamenson/runtime/copy.py",
"copies": "1",
"size": "2756",
"license": "apache-2.0",
"hash": 612613344360404900,
"line_mean": 24.5185185185,
"line_max": 90,
"alpha_frac": 0.6407837446,
"autogenerated": false,
"ratio": 3.3982737361282367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9500479936239893,
"avg_score": 0.007715508897668811,
"num_lines": 108
} |
'''arbitrary recursions data structure tear down
'''
from __future__ import with_statement
from __future__ import absolute_import
from collections import defaultdict
from .func import identity, noop, compose
from .ctxsingleton import CtxSingleton
from .multimethod import MultiMethod, defmethod
from .atypes import anytype, typep
from .atypes.etypes import recursions_type
from .atypes.ptypes import atomic_type
from .collections import OrderedDict, OrderedDefaultDict, OrderedSet
__all__ = ['do_deletion','delete','state','delete_obj','recursions_type']
class DeleteState(CtxSingleton):
def _cxs_setup_top(self):
self.memo = set()
self.recursions = None
self.delete_immutable = True
state = DeleteState()
def do_deletion(op, recursions=None, delete_immutable=True):
assert typep(recursions, recursions_type)
with state(recursions=recursions, delete_immutable=delete_immutable):
delete(op)
def delete(op):
recursions = state.recursions
if recursions is not None and recursions==0:
return
memo = state.memo
i = id(op)
if i in memo:
return
memo.add(i)
if recursions is None:
delete_obj(op)
else:
with state.top(recursions=recursions-1):
delete_obj(op)
delete_obj = MultiMethod('delete_obj',
doc='objection destruction registry')
#by default no deletion
defmethod(delete_obj, 'anytype')(identity)
#atomic types
defmethod(delete_obj, 'atomic_type')(identity)
#sequence types
@defmethod(delete_obj, '(tuple,frozenset)')
def meth(op, delete=delete):
if not state.delete_immutable:
raise TypeError("can't delete immutable objects %r" % (op,))
for el in op:
delete(el)
@defmethod(delete_obj, 'list')
def meth(op, delete=delete):
for el in op:
delete(el)
del op[::]
@defmethod(delete_obj, '(set,OrderedSet)')
def meth(op, delete=delete):
for el in op:
delete(el)
op.clear()
#mappings
@defmethod(delete_obj, '(dict, defaultdict, OrderedDefaultDict, OrderedDict)')
def meth(op, delete=delete):
for k,v in op.iteritems():
delete(k)
delete(v)
op.clear()
| {
"repo_name": "matthagy/Jamenson",
"path": "jamenson/runtime/delete.py",
"copies": "1",
"size": "2184",
"license": "apache-2.0",
"hash": -1533946602976344800,
"line_mean": 24.6941176471,
"line_max": 78,
"alpha_frac": 0.6785714286,
"autogenerated": false,
"ratio": 3.562805872756933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9704718247669036,
"avg_score": 0.007331810737579301,
"num_lines": 85
} |
"""Arcam component."""
import asyncio
import logging
from arcam.fmj import ConnectionFailed
from arcam.fmj.client import Client
import async_timeout
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
DOMAIN_DATA_ENTRIES,
DOMAIN_DATA_TASKS,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.115")
async def _await_cancel(task):
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the component."""
hass.data[DOMAIN_DATA_ENTRIES] = {}
hass.data[DOMAIN_DATA_TASKS] = {}
async def _stop(_):
asyncio.gather(
*[_await_cancel(task) for task in hass.data[DOMAIN_DATA_TASKS].values()]
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: config_entries.ConfigEntry):
"""Set up config entry."""
entries = hass.data[DOMAIN_DATA_ENTRIES]
tasks = hass.data[DOMAIN_DATA_TASKS]
client = Client(entry.data[CONF_HOST], entry.data[CONF_PORT])
entries[entry.entry_id] = client
task = asyncio.create_task(_run_client(hass, client, DEFAULT_SCAN_INTERVAL))
tasks[entry.entry_id] = task
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "media_player")
)
return True
async def async_unload_entry(hass, entry):
"""Cleanup before removing config entry."""
await hass.config_entries.async_forward_entry_unload(entry, "media_player")
task = hass.data[DOMAIN_DATA_TASKS].pop(entry.entry_id)
await _await_cancel(task)
hass.data[DOMAIN_DATA_ENTRIES].pop(entry.entry_id)
return True
async def _run_client(hass, client, interval):
def _listen(_):
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_CLIENT_DATA, client.host)
while True:
try:
with async_timeout.timeout(interval):
await client.start()
_LOGGER.debug("Client connected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STARTED, client.host
)
try:
with client.listen(_listen):
await client.process()
finally:
await client.stop()
_LOGGER.debug("Client disconnected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STOPPED, client.host
)
except ConnectionFailed:
await asyncio.sleep(interval)
except asyncio.TimeoutError:
continue
except asyncio.CancelledError:
raise
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception, aborting arcam client")
return
| {
"repo_name": "balloob/home-assistant",
"path": "homeassistant/components/arcam_fmj/__init__.py",
"copies": "12",
"size": "3292",
"license": "apache-2.0",
"hash": 3082602372841238000,
"line_mean": 27.6260869565,
"line_max": 88,
"alpha_frac": 0.6534021871,
"autogenerated": false,
"ratio": 3.9903030303030302,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004073095028212314,
"num_lines": 115
} |
"""Arcam component."""
import logging
import asyncio
import voluptuous as vol
import async_timeout
from arcam.fmj.client import Client
from arcam.fmj import ConnectionFailed
from homeassistant import config_entries
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType, ConfigType
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_ZONE,
SERVICE_TURN_ON,
)
from .const import (
DOMAIN,
DOMAIN_DATA_ENTRIES,
DOMAIN_DATA_CONFIG,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
def _optional_zone(value):
if value:
return ZONE_SCHEMA(value)
return ZONE_SCHEMA({})
def _zone_name_validator(config):
for zone, zone_config in config[CONF_ZONE].items():
if CONF_NAME not in zone_config:
zone_config[CONF_NAME] = "{} ({}:{}) - {}".format(
DEFAULT_NAME,
config[CONF_HOST],
config[CONF_PORT],
zone)
return config
ZONE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(SERVICE_TURN_ON): cv.SERVICE_SCHEMA,
}
)
DEVICE_SCHEMA = vol.Schema(
vol.All({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.positive_int,
vol.Optional(
CONF_ZONE, default={1: _optional_zone(None)}
): {vol.In([1, 2]): _optional_zone},
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.positive_int,
}, _zone_name_validator)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [DEVICE_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the component."""
hass.data[DOMAIN_DATA_ENTRIES] = {}
hass.data[DOMAIN_DATA_CONFIG] = {}
for device in config[DOMAIN]:
hass.data[DOMAIN_DATA_CONFIG][
(device[CONF_HOST], device[CONF_PORT])
] = device
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: device[CONF_HOST],
CONF_PORT: device[CONF_PORT],
},
)
)
return True
async def async_setup_entry(
hass: HomeAssistantType, entry: config_entries.ConfigEntry
):
"""Set up an access point from a config entry."""
client = Client(entry.data[CONF_HOST], entry.data[CONF_PORT])
config = hass.data[DOMAIN_DATA_CONFIG].get(
(entry.data[CONF_HOST], entry.data[CONF_PORT]),
DEVICE_SCHEMA(
{
CONF_HOST: entry.data[CONF_HOST],
CONF_PORT: entry.data[CONF_PORT],
}
),
)
hass.data[DOMAIN_DATA_ENTRIES][entry.entry_id] = {
"client": client,
"config": config,
}
asyncio.ensure_future(
_run_client(hass, client, config[CONF_SCAN_INTERVAL])
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "media_player")
)
return True
async def _run_client(hass, client, interval):
task = asyncio.Task.current_task()
run = True
async def _stop(_):
nonlocal run
run = False
task.cancel()
await task
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop)
def _listen(_):
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_DATA, client.host
)
while run:
try:
with async_timeout.timeout(interval):
await client.start()
_LOGGER.debug("Client connected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STARTED, client.host
)
try:
with client.listen(_listen):
await client.process()
finally:
await client.stop()
_LOGGER.debug("Client disconnected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STOPPED, client.host
)
except ConnectionFailed:
await asyncio.sleep(interval)
except asyncio.TimeoutError:
continue
| {
"repo_name": "jabesq/home-assistant",
"path": "homeassistant/components/arcam_fmj/__init__.py",
"copies": "1",
"size": "4619",
"license": "apache-2.0",
"hash": -8597610681772405000,
"line_mean": 25.2443181818,
"line_max": 77,
"alpha_frac": 0.5875730678,
"autogenerated": false,
"ratio": 3.927721088435374,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015294156235374,
"avg_score": null,
"num_lines": null
} |
"""Arcam media player."""
import logging
from typing import Optional
from arcam.fmj import DecodeMode2CH, DecodeModeMCH, IncomingAudioFormat, SourceCodes
from arcam.fmj.state import State
from homeassistant import config_entries
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_NAME,
CONF_ZONE,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.helpers.service import async_call_from_config
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
DOMAIN,
DOMAIN_DATA_ENTRIES,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Set up the configuration entry."""
data = hass.data[DOMAIN_DATA_ENTRIES][config_entry.entry_id]
client = data["client"]
config = data["config"]
async_add_entities(
[
ArcamFmj(
State(client, zone),
zone_config[CONF_NAME],
zone_config.get(SERVICE_TURN_ON),
)
for zone, zone_config in config[CONF_ZONE].items()
]
)
return True
class ArcamFmj(MediaPlayerDevice):
"""Representation of a media device."""
def __init__(self, state: State, name: str, turn_on: Optional[ConfigType]):
"""Initialize device."""
self._state = state
self._name = name
self._turn_on = turn_on
self._support = (
SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_OFF
)
if state.zn == 1:
self._support |= SUPPORT_SELECT_SOUND_MODE
def _get_2ch(self):
"""Return if source is 2 channel or not."""
audio_format, _ = self._state.get_incoming_audio_format()
return bool(
audio_format
in (IncomingAudioFormat.PCM, IncomingAudioFormat.ANALOGUE_DIRECT, None)
)
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"identifiers": {(DOMAIN, self._state.client.host, self._state.client.port)},
"model": "FMJ",
"manufacturer": "Arcam",
}
@property
def should_poll(self) -> bool:
"""No need to poll."""
return False
@property
def name(self):
"""Return the name of the controlled device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state.get_power():
return STATE_ON
return STATE_OFF
@property
def supported_features(self):
"""Flag media player features that are supported."""
support = self._support
if self._state.get_power() is not None or self._turn_on:
support |= SUPPORT_TURN_ON
return support
async def async_added_to_hass(self):
"""Once registered, add listener for events."""
await self._state.start()
@callback
def _data(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state()
@callback
def _started(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
@callback
def _stopped(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
self.hass.helpers.dispatcher.async_dispatcher_connect(SIGNAL_CLIENT_DATA, _data)
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STARTED, _started
)
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STOPPED, _stopped
)
async def async_update(self):
"""Force update of state."""
_LOGGER.debug("Update state %s", self.name)
await self._state.update()
async def async_mute_volume(self, mute):
"""Send mute command."""
await self._state.set_mute(mute)
self.async_schedule_update_ha_state()
async def async_select_source(self, source):
"""Select a specific source."""
try:
value = SourceCodes[source]
except KeyError:
_LOGGER.error("Unsupported source %s", source)
return
await self._state.set_source(value)
self.async_schedule_update_ha_state()
async def async_select_sound_mode(self, sound_mode):
"""Select a specific source."""
try:
if self._get_2ch():
await self._state.set_decode_mode_2ch(DecodeMode2CH[sound_mode])
else:
await self._state.set_decode_mode_mch(DecodeModeMCH[sound_mode])
except KeyError:
_LOGGER.error("Unsupported sound_mode %s", sound_mode)
return
self.async_schedule_update_ha_state()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._state.set_volume(round(volume * 99.0))
self.async_schedule_update_ha_state()
async def async_volume_up(self):
"""Turn volume up for media player."""
await self._state.inc_volume()
self.async_schedule_update_ha_state()
async def async_volume_down(self):
"""Turn volume up for media player."""
await self._state.dec_volume()
self.async_schedule_update_ha_state()
async def async_turn_on(self):
"""Turn the media player on."""
if self._state.get_power() is not None:
_LOGGER.debug("Turning on device using connection")
await self._state.set_power(True)
elif self._turn_on:
_LOGGER.debug("Turning on device using service call")
await async_call_from_config(
self.hass,
self._turn_on,
variables=None,
blocking=True,
validate_config=False,
)
else:
_LOGGER.error("Unable to turn on")
async def async_turn_off(self):
"""Turn the media player off."""
await self._state.set_power(False)
@property
def source(self):
"""Return the current input source."""
value = self._state.get_source()
if value is None:
return None
return value.name
@property
def source_list(self):
"""List of available input sources."""
return [x.name for x in self._state.get_source_list()]
@property
def sound_mode(self):
"""Name of the current sound mode."""
if self._state.zn != 1:
return None
if self._get_2ch():
value = self._state.get_decode_mode_2ch()
else:
value = self._state.get_decode_mode_mch()
if value:
return value.name
return None
@property
def sound_mode_list(self):
"""List of available sound modes."""
if self._state.zn != 1:
return None
if self._get_2ch():
return [x.name for x in DecodeMode2CH]
return [x.name for x in DecodeModeMCH]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
value = self._state.get_mute()
if value is None:
return None
return value
@property
def volume_level(self):
"""Volume level of device."""
value = self._state.get_volume()
if value is None:
return None
return value / 99.0
@property
def media_content_type(self):
"""Content type of current playing media."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = MEDIA_TYPE_MUSIC
elif source == SourceCodes.FM:
value = MEDIA_TYPE_MUSIC
else:
value = None
return value
@property
def media_channel(self):
"""Channel currently playing."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = self._state.get_dab_station()
elif source == SourceCodes.FM:
value = self._state.get_rds_information()
else:
value = None
return value
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = self._state.get_dls_pdt()
else:
value = None
return value
@property
def media_title(self):
"""Title of current playing media."""
source = self._state.get_source()
if source is None:
return None
channel = self.media_channel
if channel:
value = f"{source.name} - {channel}"
else:
value = source.name
return value
| {
"repo_name": "postlund/home-assistant",
"path": "homeassistant/components/arcam_fmj/media_player.py",
"copies": "3",
"size": "9529",
"license": "apache-2.0",
"hash": 6663209923765150000,
"line_mean": 28.32,
"line_max": 88,
"alpha_frac": 0.5797040613,
"autogenerated": false,
"ratio": 4.121539792387543,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6201243853687544,
"avg_score": null,
"num_lines": null
} |
"""Arcam media player."""
import logging
from arcam.fmj import DecodeMode2CH, DecodeModeMCH, IncomingAudioFormat, SourceCodes
from arcam.fmj.state import State
from homeassistant import config_entries
from homeassistant.components.media_player import BrowseMedia, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_MUSIC,
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_PLAY_MEDIA,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant, callback
from .config_flow import get_entry_client
from .const import (
DOMAIN,
EVENT_TURN_ON,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Set up the configuration entry."""
client = get_entry_client(hass, config_entry)
async_add_entities(
[
ArcamFmj(
config_entry.title,
State(client, zone),
config_entry.unique_id or config_entry.entry_id,
)
for zone in [1, 2]
],
True,
)
return True
class ArcamFmj(MediaPlayerEntity):
"""Representation of a media device."""
def __init__(
self,
device_name,
state: State,
uuid: str,
):
"""Initialize device."""
self._state = state
self._device_name = device_name
self._name = f"{device_name} - Zone: {state.zn}"
self._uuid = uuid
self._support = (
SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
| SUPPORT_BROWSE_MEDIA
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
)
if state.zn == 1:
self._support |= SUPPORT_SELECT_SOUND_MODE
def _get_2ch(self):
"""Return if source is 2 channel or not."""
audio_format, _ = self._state.get_incoming_audio_format()
return bool(
audio_format
in (
IncomingAudioFormat.PCM,
IncomingAudioFormat.ANALOGUE_DIRECT,
IncomingAudioFormat.UNDETECTED,
None,
)
)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._state.zn == 1
@property
def unique_id(self):
"""Return unique identifier if known."""
return f"{self._uuid}-{self._state.zn}"
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"name": self._device_name,
"identifiers": {
(DOMAIN, self._uuid),
(DOMAIN, self._state.client.host, self._state.client.port),
},
"model": "Arcam FMJ AVR",
"manufacturer": "Arcam",
}
@property
def should_poll(self) -> bool:
"""No need to poll."""
return False
@property
def name(self):
"""Return the name of the controlled device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state.get_power():
return STATE_ON
return STATE_OFF
@property
def supported_features(self):
"""Flag media player features that are supported."""
return self._support
async def async_added_to_hass(self):
"""Once registered, add listener for events."""
await self._state.start()
@callback
def _data(host):
if host == self._state.client.host:
self.async_write_ha_state()
@callback
def _started(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
@callback
def _stopped(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_DATA, _data
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STARTED, _started
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STOPPED, _stopped
)
)
async def async_update(self):
"""Force update of state."""
_LOGGER.debug("Update state %s", self.name)
await self._state.update()
async def async_mute_volume(self, mute):
"""Send mute command."""
await self._state.set_mute(mute)
self.async_write_ha_state()
async def async_select_source(self, source):
"""Select a specific source."""
try:
value = SourceCodes[source]
except KeyError:
_LOGGER.error("Unsupported source %s", source)
return
await self._state.set_source(value)
self.async_write_ha_state()
async def async_select_sound_mode(self, sound_mode):
"""Select a specific source."""
try:
if self._get_2ch():
await self._state.set_decode_mode_2ch(DecodeMode2CH[sound_mode])
else:
await self._state.set_decode_mode_mch(DecodeModeMCH[sound_mode])
except KeyError:
_LOGGER.error("Unsupported sound_mode %s", sound_mode)
return
self.async_write_ha_state()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._state.set_volume(round(volume * 99.0))
self.async_write_ha_state()
async def async_volume_up(self):
"""Turn volume up for media player."""
await self._state.inc_volume()
self.async_write_ha_state()
async def async_volume_down(self):
"""Turn volume up for media player."""
await self._state.dec_volume()
self.async_write_ha_state()
async def async_turn_on(self):
"""Turn the media player on."""
if self._state.get_power() is not None:
_LOGGER.debug("Turning on device using connection")
await self._state.set_power(True)
else:
_LOGGER.debug("Firing event to turn on device")
self.hass.bus.async_fire(EVENT_TURN_ON, {ATTR_ENTITY_ID: self.entity_id})
async def async_turn_off(self):
"""Turn the media player off."""
await self._state.set_power(False)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if media_content_id not in (None, "root"):
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
presets = self._state.get_preset_details()
radio = [
BrowseMedia(
title=preset.name,
media_class=MEDIA_CLASS_MUSIC,
media_content_id=f"preset:{preset.index}",
media_content_type=MEDIA_TYPE_MUSIC,
can_play=True,
can_expand=False,
)
for preset in presets.values()
]
root = BrowseMedia(
title="Root",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="root",
media_content_type="library",
can_play=False,
can_expand=True,
children=radio,
)
return root
async def async_play_media(self, media_type: str, media_id: str, **kwargs) -> None:
"""Play media."""
if media_id.startswith("preset:"):
preset = int(media_id[7:])
await self._state.set_tuner_preset(preset)
else:
_LOGGER.error("Media %s is not supported", media_id)
return
@property
def source(self):
"""Return the current input source."""
value = self._state.get_source()
if value is None:
return None
return value.name
@property
def source_list(self):
"""List of available input sources."""
return [x.name for x in self._state.get_source_list()]
@property
def sound_mode(self):
"""Name of the current sound mode."""
if self._state.zn != 1:
return None
if self._get_2ch():
value = self._state.get_decode_mode_2ch()
else:
value = self._state.get_decode_mode_mch()
if value:
return value.name
return None
@property
def sound_mode_list(self):
"""List of available sound modes."""
if self._state.zn != 1:
return None
if self._get_2ch():
return [x.name for x in DecodeMode2CH]
return [x.name for x in DecodeModeMCH]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
value = self._state.get_mute()
if value is None:
return None
return value
@property
def volume_level(self):
"""Volume level of device."""
value = self._state.get_volume()
if value is None:
return None
return value / 99.0
@property
def media_content_type(self):
"""Content type of current playing media."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = MEDIA_TYPE_MUSIC
elif source == SourceCodes.FM:
value = MEDIA_TYPE_MUSIC
else:
value = None
return value
@property
def media_content_id(self):
"""Content type of current playing media."""
source = self._state.get_source()
if source in (SourceCodes.DAB, SourceCodes.FM):
preset = self._state.get_tuner_preset()
if preset:
value = f"preset:{preset}"
else:
value = None
else:
value = None
return value
@property
def media_channel(self):
"""Channel currently playing."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = self._state.get_dab_station()
elif source == SourceCodes.FM:
value = self._state.get_rds_information()
else:
value = None
return value
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = self._state.get_dls_pdt()
else:
value = None
return value
@property
def media_title(self):
"""Title of current playing media."""
source = self._state.get_source()
if source is None:
return None
channel = self.media_channel
if channel:
value = f"{source.name} - {channel}"
else:
value = source.name
return value
| {
"repo_name": "home-assistant/home-assistant",
"path": "homeassistant/components/arcam_fmj/media_player.py",
"copies": "2",
"size": "11804",
"license": "apache-2.0",
"hash": -5845491601942128000,
"line_mean": 28.1456790123,
"line_max": 93,
"alpha_frac": 0.560149102,
"autogenerated": false,
"ratio": 4.128716334382651,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002315931370265366,
"num_lines": 405
} |
import sys, os, re
import numpy as np
import hashlib
import datetime as dt
try: from collections import OrderedDict
except ImportError:
try: from ordereddict import OrderedDict
except ImportError:
print "Require OrderedDict, https://pypi.python.org/pypi/ordereddict"
raise
import netcdf_builder as nb
import json_handler as jh
import numpy_routines as nr
def split_asciigrid(fname):
"""
Split an Arc ASCII grid file into its header, data and (possibly) tail
components. End-of-line characters and whitespace at either end of a
line are striped off. The head and tail components are returned as a
list of lines. The data component is returned as a list of lists, with
each inner list being a list of (string) values in a line.
The number of rows and columns in the grid are determined from the
head component as it is read ('ncols' and 'nrows', respectively).
A ValueError is raised if either the number of data elements in a line
is not equal to the expected number of columns, or if the number of
data lines is not equal to the expected number of rows.
"""
headmeta = []
datarows = []
tailmeta = []
ncols = -1
nrows = -1
isheader = 1
f = open(fname,'r')
for line in f:
line=line.strip()
if not line: continue
a = line.split()
# Assume header lines contain only 2 whitespace-split elements
if (len(a)==2) and isheader:
headmeta.append(line)
if a[0]=='ncols': ncols = float(a[1])
elif a[0]=='nrows': nrows = float(a[1])
# Else if the number of elements==ncols the line is probably data
elif len(a)==ncols:
isheader = 0
datarows.append(a)
# Else if we don't have the expected number of data rows there is
# an error
elif len(datarows)!=nrows:
raise ValueError("Line contains data but not of length "+str(ncols))
# Anything else is tail metadata
else:
tailmeta.append(line)
f.close()
return headmeta,datarows,tailmeta
def arcasciihdr_to_dict(header):
"""
Read an Arc ASCII header list of strings (from split_asciigrid) into an
OrderedDict. All keys are returned as lowercase. All values are returned as
strings or a list of strings.
"""
meta = OrderedDict()
r = re.compile("^(\S+)\s+(.+)$")
for line in header:
m = re.match(r, line.strip())
if m:
meta[m.group(1).lower()] = m.group(2)
return meta
def set_latlon(meta, datadict=None):
"""
Create latitude and longitude vectors from the values contained in the
Arc header dictionary, meta. Returned as a tuple of
(latitude,longitude,datadict).
"""
# Convert corners to centers if required
if 'xllcorner' in meta:
meta['xllcenter'] = float(meta['xllcorner'])+float(meta['cellsize'])/2
if 'yllcorner' in meta:
meta['yllcenter'] = float(meta['yllcorner'])+float(meta['cellsize'])/2
# Create vectors
lonvec = nr.create_vector(float(meta['xllcenter']), \
float(meta['ncols']), \
float(meta['cellsize']))
latvec = nr.create_vector(float(meta['yllcenter']), \
float(meta['nrows']), \
float(meta['cellsize']))
latvec = latvec[::-1] # reverse elements
# Create/update a datadict, which includes standardised labels for later
if datadict is None: datadict = dict()
datadict.update({
'xmin':min(lonvec),
'xmax':max(lonvec),
'xstep':float(meta['cellsize']),
'xnum':int(meta['ncols']),
'xunits':'degrees_east',
'ymin':min(latvec),
'ymax':max(latvec),
'ystep':float(meta['cellsize']),
'ynum':int(meta['nrows']),
'yunits':'degrees_north'
})
# Return a tuple
return latvec,lonvec,datadict
def asciigrid_to_numpy(datarows, meta, datadict=None):
"""
Convert a list of lists of (string) values to a 3D NumPy array.
Arg meta is not used but is retained for consistency.
"""
# Copy datarows into a 3D array
# Want shape to align with time,latitude,longitude
data = np.array(datarows,dtype=np.float32,ndmin=3)
# Create/update a datadict, which includes standardised labels for later
if datadict is None: datadict = dict()
datadict.update({'datatype':'f4'}) # Float32
# Return a tuple
return data,datadict
def resample_array(input_data, input_lat, input_lon, input_dict=None):
"""
Code snippets for resampling the input array on to a new grid, changing
the missing value or reducing the data type or floating point precision
of the array.
Edit the components of this routine for your particular needs. Just check
that input_data and output_data arrays are being used and referenced as
required.
"""
# Copy dict
if input_dict is None:
output_dict = dict()
else:
output_dict = copy.deepcopy(input_dict)
# Output dimensions
(xs, xn, xc) = (112, 841, 0.05) # start, number, cellsize
(ys, yn, yc) = (-44, 681, 0.05) # start, number, cellsize
output_lon = nr.create_vector(xs, xn, xc)
output_lat = nr.create_vector(ys, yn, yc)
output_lat = output_lat[::-1] # reverse elements
# Create output array
output_data = np.zeros((output_lat.size,output_lon.size)) \
+ input_dict['missing']
# Copy data onto output grid
output_data = nr.copy_grids(input_data,input_lon,input_lat,
output_data,output_lon,output_lat)
output_dict.update({'xmin':min(output_lon),
'xmax':max(output_lon),
'xstep':xc,
'xnum':xn,
'ymin':min(output_lat),
'ymax':max(output_lat),
'ystep':yc,
'ynum':yn,})
# Reduce precision of values to 1 decimal place and convert to f32
output_data = input_data.round(decimals=1)
output_data = np.float32(input_data)
output_dict.update({'datatype':'f4'})
# Change missing value
miss = -999
output_data = nr.replace_values(input_data,input_dict['missing'],miss)
output_dict.update({'missing':miss})
return (output_data, output_lat, output_lon, output_dict)
def set_attributes(datadict):
"""
See http://webhelp.esri.com/arcgisdesktop/9.3/index.cfm?TopicName=raster_to_float_%28conversion%29
"""
fltmeta = OrderedDict()
fltmeta['ncols'] = "{0:d}".format(datadict['xnum'])
fltmeta['nrows'] = "{0:d}".format(datadict['ynum'])
fltmeta['xllcorner'] = "{0:f}".format(datadict['xmin']-datadict['xstep']/2)
fltmeta['yllcorner'] = "{0:f}".format(datadict['ymin']-datadict['ystep']/2)
fltmeta['cellsize'] = "{0:f}".format(datadict['ystep'])
fltmeta['nodata_value'] = "{0:f}".format(datadict['missing'])
if sys.byteorder == "little":
fltmeta['byteorder'] = 'LSBFIRST'
else:
fltmeta['byteorder'] = 'MSBFIRST'
return fltmeta
def asciigrid_to_flt(arcfilename,fileroot):
"""
The main routine that calls the calls other routines to prepare the data
and metadata and create the Arc Float file.
"""
# Read ascii grid file
asciihead,asciidata,asciitail = split_asciigrid(arcfilename)
meta = arcasciihdr_to_dict(asciihead)
# Create numpy components
latvec,lonvec,datadict = set_latlon(meta)
data,datadict = asciigrid_to_numpy(asciidata,meta,datadict)
# Resample or edit array
# Add a default no_data value if required.
miss = -999.0
if 'nodata_value' in meta: miss = float(meta['nodata_value'])
datadict['missing'] = miss
#data,latvec,lonvec,datadict = resample_array(data,latvec,lonvec,datadict)
# Prepare metadata
attributes = set_attributes(datadict)
# Write Arc Float files
fltfile = fileroot+'.flt'
hdrfile = fileroot+'.hdr'
if os.path.exists(fltfile): os.remove(fltfile)
if os.path.exists(hdrfile): os.remove(hdrfile)
data.tofile(fltfile)
f = open(hdrfile,'w')
for k in attributes:
f.write(k+" "+attributes[k]+'\n')
f.close
print 'Wrote:',fltfile
print 'Wrote:',hdrfile
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage:"
print " ", sys.argv[0], "path/to/arc_ascii_grid_file"
print "Notes:"
print " You may want to you add or amend some of the array manipulation"
print " routines given in the subroutine resample_array. This subroutine is not"
print " invoked by default."
exit()
else:
fileroot = re.sub('.\w+$','',sys.argv[1])
asciigrid_to_flt(sys.argv[1],fileroot)
| {
"repo_name": "KimberleyOpie/common-tools",
"path": "formats_to_nc/arcasciigrid_to_flt.py",
"copies": "1",
"size": "9782",
"license": "apache-2.0",
"hash": -3643298465307606500,
"line_mean": 34.8315018315,
"line_max": 102,
"alpha_frac": 0.6287057861,
"autogenerated": false,
"ratio": 3.5506352087114337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9617725992190023,
"avg_score": 0.012323000524282004,
"num_lines": 273
} |
#arc consistency implementation with backtracking
from sys import stdout
from util import Queue as queue
import copy
def solve(string):
variables = stringToVariables(string)
assignment = {k:v[0] for k,v in variables.items() if len(v)==1 }
prettyPrint(backtrackingSearch(variables, assignment))
def initializeArcQueue(variables):
arcs = queue()
for var in variables:
neighbors = connections(var,variables).keys()
for neighbor in neighbors:
arcs.push((var, neighbor))
return arcs
def enforceArcConsistency(variables, assignment, arcs):
while not arcs.isEmpty():
varA, varB = arcs.pop()
if (varA in assignment.keys()) and (len(variables[varB]) > 1):
variables, assignment, assigned = removeInconsistentValues(varA, varB, variables, assignment)
if assigned:
arcs = queueArcs(varB, variables, arcs)
return (assignment, arcs)
def queueArcs(var, variables, arcs):
neighbors = connections(var,variables).keys()
for neighbor in neighbors:
arcs.push((var, neighbor))
return arcs
def removeInconsistentValues(varA, varB, variables, assignment):
assigned = False
for value in variables[varB]:
if value == assignment[varA]:
variables[varB].remove(value)
if len(variables[varB]) == 1:
assignment[varB] = variables[varB][0] #assignment
assigned = True
return (variables, assignment, assigned)
def completed(assignment):
return len(assignment) == 81 and reduce(lambda x,y:x+y, assignment.values()) == 405
def selectUnassignedVariable(variables, assignment):
return [x for x in variables.keys() if x not in assignment][0]
def connections(var, nodes):
row = lambda n: n/9
col = lambda n: n%9
box = lambda n: 3*(row(n)/3)+(col(n)/3)
return {k:v for k,v in nodes.items() if row(var)==row(k) or col(var)==col(k) or box(var)==box(k)}
def consistent(var, value, assignment):
return len([x for x in connections(var, assignment).values() if x==value]) == 0
def backtrackingSearch(variables, assignment):
arcs = initializeArcQueue(variables)
enforceArcConsistency(variables, assignment, arcs)
branchAssignment = copy.deepcopy(assignment)
return recursiveBacktracking(variables, branchAssignment, assignment, arcs)
def recursiveBacktracking(variables, branchAssignment, assignment, arcs):
if completed(assignment): return assignment
var = selectUnassignedVariable(variables, assignment)
for value in variables[var]:
if consistent(var, value, assignment):
branchAssignment[var] = value
assignment[var] = value
arcs = queueArcs(var, variables, arcs)
branchAssignment, arcs = enforceArcConsistency(copy.deepcopy(variables), branchAssignment, arcs)
result = recursiveBacktracking(variables, branchAssignment, assignment, arcs)
if result: return result
del assignment[var]
return False
#end algorithm - start interface
def prettyPrint(assignment):
i = 0
for w in range (1,4):
for x in range(1,4):
for y in range(1,4):
stdout.write("|")
for z in range(1,4):
stdout.write("%d|" % assignment[i])
i+=1
stdout.write(" ")
stdout.write("\n")
stdout.write("-----------------------\n")
def stringToVariables(string):
nodes = {index:[int(x)] for index, x in enumerate(list(string.replace("-","0")))}
for k,v in nodes.items():
if v==[0]: nodes[k]=[1,2,3,4,5,6,7,8,9]
return nodes
""" alternative implementation for selectUnassignedVariable method using MRV and LCV. The minimum remaining value and the least constraining value heuristics seems to select similar unassigned variables on a sudoku problem. Nevertheless, the tests showed on complex boards this heuristic increases the computational time, hence it was abandoned:
def selectUnassignedVariable(variables, assignment):
unassigned = {k:v for k,v in variables.items() if k not in assignment}
minimumRemainingValues = max(map(lambda x:len(x), unassigned.values()))
return [k for k,v in unassigned.items() if len(v)==minimumRemainingValues][0]
note: still need to try min-conflicts algorithm
""" | {
"repo_name": "lucasosouza/berkeleyAI",
"path": "project1b/sudokuALG3.py",
"copies": "1",
"size": "4071",
"license": "mit",
"hash": -1677333620628000500,
"line_mean": 36.0654205607,
"line_max": 345,
"alpha_frac": 0.7121100467,
"autogenerated": false,
"ratio": 3.4210084033613444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4633118450061344,
"avg_score": null,
"num_lines": null
} |
import sys, os, re
import numpy as np
import hashlib
import datetime as dt
try: from collections import OrderedDict
except ImportError:
try: from ordereddict import OrderedDict
except ImportError:
print "Require OrderedDict, https://pypi.python.org/pypi/ordereddict"
raise
import netcdf_builder as nb
import json_handler as jh
import numpy_routines as nr
def arcflthdr_to_dict(header):
"""
Read an Arc float header file into an OrderedDict. All keys are returned as
lowercase. All values are returned as strings or a list of strings.
"""
# Read the metadata from the header
meta = OrderedDict()
r = re.compile("^(\S+)\s+(.+)$")
f = open(header,'r')
for line in f:
m = re.match(r, line.strip())
if m:
meta[m.group(1).lower()] = m.group(2)
f.close()
return meta
def set_latlon(meta, datadict=None):
"""
Create latitude and longitude vectors from the values contained in the
Arc header dictionary, meta. Returned as a tuple of
(latitude,longitude,datadict).
"""
# Convert corners to centers if required
if 'xllcorner' in meta:
meta['xllcenter'] = float(meta['xllcorner'])+float(meta['cellsize'])/2
if 'yllcorner' in meta:
meta['yllcenter'] = float(meta['yllcorner'])+float(meta['cellsize'])/2
# Create vectors
lonvec = nr.create_vector(float(meta['xllcenter']), \
float(meta['ncols']), \
float(meta['cellsize']))
latvec = nr.create_vector(float(meta['yllcenter']), \
float(meta['nrows']), \
float(meta['cellsize']))
latvec = latvec[::-1] # reverse elements
# Create/update a datadict, which includes standardised labels for later
if datadict is None: datadict = dict()
datadict.update({
'xmin':min(lonvec),
'xmax':max(lonvec),
'xstep':float(meta['cellsize']),
'xnum':int(meta['ncols']),
'xunits':'degrees_east',
'ymin':min(latvec),
'ymax':max(latvec),
'ystep':float(meta['cellsize']),
'ynum':int(meta['nrows']),
'yunits':'degrees_north'
})
# Return a tuple
return latvec,lonvec,datadict
def floatgrid_to_numpy(fltfile, meta, datadict=None):
"""
Convert a binary file to a 3D NumPy array.
"""
# Default datatype for Arc flt grids
dtype = '<f4' # little-endian float32
# Consider byteorder
# lsbfirst -> little-endian
# msbfirst -> big-endian
if 'byteorder' in meta:
if re.match('[mM]',meta['byteorder']): dtype = '>f4'
# Copy data into a 3D array
# Want shape to align with time,latitude,longitude
data = np.fromfile(fltfile,dtype=dtype)
data.shape = (1, int(meta['nrows']), int(meta['ncols']))
# Create/update a datadict, which includes standardised labels for later
if datadict is None: datadict = dict()
datadict.update({'datatype':'f4'}) # Float32. Endian-ness is managed by
# the netCDF library.
# Return a tuple
return data,datadict
def resample_array(input_data, input_lat, input_lon, input_dict=None):
"""
Code snippets for resampling the input array on to a new grid, changing
the missing value or reducing the data type or floating point precision
of the array.
Edit the components of this routine for your particular needs. Just check
that input_data and output_data arrays are being used and referenced as
required.
"""
# See arcasciigrid_to_nc.py for code snippet examples
pass
def set_datetime(fname, datadict=None):
"""
Code example for creating the date/time information. Generally, the
date/time information will be in the filename as its not included in the
standard header information.
Edit the components of this routine for your particular needs.
"""
# Example: ..../arcfltgrid_WRel2_20130113.{flt,hdr}
m = re.search('(\d{4})(\d\d)(\d\d)',fname)
d1year = int(m.group(1))
d1month = int(m.group(2))
d1day = int(m.group(3))
# Monthly = (d1month + 1) - (1 day)
d2year = d1year
d2month = d1month + 1
if d2month == 13:
d2month = 1
d2year = d2year + 1
d1 = dt.datetime(d1year,d1month,d1day)
d2 = dt.datetime(d2year,d2month,d1day) - dt.timedelta(days=1)
tdur = "P1M"
# Create/update a datadict, which adds some standardised labels for later
if datadict is None: datadict = dict()
# Metadata elements
# See http://en.wikipedia.org/wiki/ISO_8601
datadict.update({
'dcreate':"unknown",
'dmodify':dt.datetime.utcnow().strftime("%Y%m%dT%H%M%S"),
'tmin':d1.strftime("%Y-%m-%d"),
'tmax':d2.strftime("%Y-%m-%d"),
'tduration':"P1M",
'tresolution':"P1M"
})
# Retrun as a tuple
return d1,d2,datadict
def set_varname(fname, datadict=None):
"""
Code example for creating the variable name. Generally, the variable name
will be in the filename as its not included in the standard header
information.
Edit the components of this routine for your particular needs.
"""
part = re.search('\w+',fname)
varname = "varname1"
varlong = "long variable name"
varunit = "unit"
# Create/update a datadict, which adds some standardised labels for later
if datadict is None: datadict = dict()
datadict.update({'varname':varname,
'varlong':varlong,
'varunits':varunit})
# Capture the SHA1 digest of the input flt file
datadict['sha1'] = hashlib.sha1(open(fname,'r').read()).hexdigest()
return datadict
def set_attributes(fltfile, meta, datadict=None):
"""
Generate a dictionary with keys representing the actual attribute names
to be added to the netCDF file. Most of the values come from the datadict
dictionary, which has sanitised and collated most of the required and
available information. Some additional keys/values are added here, such as
the history attribute.
The returned dictionary can be added directly to a netCDF file (via
netcdf_builder.set_attributes()).
"""
# Define a new metadata dict to control the order of elements
ncmeta = OrderedDict()
# Create some history text. Added in a list so the join character can be
# changed easily.
history = [datadict['dmodify']+": Reformatted to NetCDF."]
history.extend(["Input file: "+fltfile])
# Add some details about any modifications to the data
# history.extend("Reduced precision of values to 1 decimal place.")
# history.extend("Created a no-data value of {0}.".format(datadict['missing']))
history.extend(["If present, the \"sha1_arcfltgrid\" attribute of a variable is the SHA1 hex digest of the input Arc Float Grid. This allows the Arc Float Grid and netCDF files to be uniquely linked irrespective of filename changes."])
# The date*, geospatial* and time* attributes come from the Attribute
# Convention for Dataset Discovery (ACDD). See,
# http://www.unidata.ucar.edu/software/netcdf/conventions.html
# These are optional for a NetCDF file but no harm in having them.
vname = datadict['varname']
ncmeta['history'] = ' '.join(history)
ncmeta['date_created'] = datadict['dcreate']
ncmeta['date_modified'] = datadict['dmodify']
ncmeta['geospatial_lat_min'] = "{0:.2f}".format(datadict['ymin'])
ncmeta['geospatial_lat_max'] = "{0:.2f}".format(datadict['ymax'])
ncmeta['geospatial_lat_units'] = datadict['yunits']
ncmeta['geospatial_lat_resolution'] = "{0:.2f}".format(datadict['ystep'])
ncmeta['geospatial_lon_min'] = "{0:.2f}".format(datadict['xmin'])
ncmeta['geospatial_lon_max'] = "{0:.2f}".format(datadict['xmax'])
ncmeta['geospatial_lon_units'] = datadict['xunits']
ncmeta['geospatial_lon_resolution'] = "{0:.2f}".format(datadict['xstep'])
ncmeta['time_coverage_start'] = datadict['tmin']
ncmeta['time_coverage_end'] = datadict['tmax']
ncmeta['time_coverage_duration'] = datadict['tduration']
ncmeta['time_coverage_resolution'] = datadict['tresolution']
ncmeta[vname+':long_name'] = datadict['varlong']
ncmeta[vname+':units'] = datadict['varunits']
ncmeta[vname+':grid_mapping'] = 'crs'
ncmeta[vname+':sha1_arcfloatgrid'] = datadict['sha1']
ncmeta['latitude:units'] = datadict['yunits']
ncmeta['longitude:units'] = datadict['xunits']
ncmeta['crs:grid_mapping_name'] = "latitude_longitude"
ncmeta['crs:longitude_of_prime_meridian'] = 0.0
ncmeta['crs:semi_major_axis'] = 6378137.0
ncmeta['crs:inverse_flattening'] = 298.257223563
return ncmeta
def floatgrid_to_nc(arcfilename,fileroot):
"""
The main routine that calls the calls other routines to prepare the data
and metadata and create the netCDF file.
"""
# Set input filenames
fltfile = re.sub('\.\w+$','.flt',arcfilename)
hdrfile = re.sub('\.\w+$','.hdr',arcfilename)
# Read metadata data
meta = arcflthdr_to_dict(hdrfile)
# Create numpy components
latvec,lonvec,datadict = set_latlon(meta)
data,datadict = floatgrid_to_numpy(fltfile,meta,datadict)
# Resample or edit array
# Add a default no_data value if required.
miss = -999.0
if 'nodata_value' in meta: miss = float(meta['nodata_value'])
datadict['missing'] = miss
#data,latvec,lonvec,datadict = resample_array(data,latvec,lonvec,datadict)
# Prepare time, variable name and metadata
d1,d2,datadict = set_datetime(fltfile,datadict)
datadict = set_varname(fltfile,datadict)
attributes = set_attributes(fltfile,meta,datadict)
# Netcdf options
# http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
nc_format = 'NETCDF4_CLASSIC'
nc_compress = True
debug = False
# Write netcdf file
if os.path.exists(fileroot+'.nc'): os.remove(fileroot+'.nc')
varname = datadict['varname']
vartype = datadict['datatype']
fillval = datadict['missing']
timevec = [d1]
ncobj = nb.ncopen(fileroot+'.nc','w',format=nc_format)
nb.set_timelatlon(ncobj,None,len(latvec),len(lonvec)) # unlimited time
nb.set_variable(ncobj,varname,dtype=vartype,fill=fillval,zlib=nc_compress)
nb.set_variable(ncobj,'crs',dims=(),dtype="i4") # Grid mapping container
nb.add_time(ncobj,timevec)
nb.add_data(ncobj,'latitude',latvec)
nb.add_data(ncobj,'longitude',lonvec)
if debug:
print varname,data.shape
nb.show_dimensions(ncobj)
# nb.add_data should work but is presently broken. Use direct method
#nb.add_data(ncobj,varname,data)
#ncobj.variables[varname][0,:,:] = data # 2D numpy array
ncobj.variables[varname][:] = data # 3D numpy array
nb.set_attributes(ncobj,attributes)
nb.ncclose(ncobj)
print 'Wrote:',fileroot+'.nc'
# Write metadata to json
if os.path.exists(fileroot+'.json'): os.remove(fileroot+'.json')
jh.json_dump(attributes,fileroot+'.json')
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage:"
print " ", sys.argv[0], "path/to/arc_float_grid_file"
print "Notes:"
print " At least two subroutines need to be edited for each type of data, namely:"
print " set_datetime - Create a time coordinate value, possibly as a regular"
print " expression match on the input filename or hardcoded."
print " set_varname - Create the netcdf variable name, possibly as a regular"
print " expression match on the input filename or harcoded."
print " Additionally, you may want to you add or amend some of the array manipulation"
print " routines given in the subroutine resample_array. This subroutine is not"
print " invoked by default."
exit()
else:
fileroot = re.sub('.\w+$','',sys.argv[1])
floatgrid_to_nc(sys.argv[1],fileroot)
| {
"repo_name": "KimberleyOpie/common-tools",
"path": "formats_to_nc/arcfltgrid_to_nc.py",
"copies": "1",
"size": "13347",
"license": "apache-2.0",
"hash": -5666187221671238000,
"line_mean": 37.5751445087,
"line_max": 239,
"alpha_frac": 0.657600959,
"autogenerated": false,
"ratio": 3.5068313189700473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9607844644779273,
"avg_score": 0.011317526638154924,
"num_lines": 346
} |
ARCGIS=False
try:
import arcpy
ARCGIS=True
except:
ARCGIS=False
import os
from osgeo import ogr
from osgeo import osr
import shutil
import tempfile
import logging
class Toolbox(object):
"""Some nasty ArcGIS class
"""
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the
.pyt file)."""
self.label = "Service Downloader"
self.alias = ""
# List of tool classes associated with this toolbox
self.tools = [Edown]
def _get_first_source_dataset(folder):
"""
:return: osgeo.ogr.DataSource
"""
first_source_file = os.listdir(folder)[0]
first_source_ds = ogr.Open(os.path.join(folder, first_source_file))
return first_source_ds
def transform(infile, output, insrs, format_name):
"""Transform input file to output target file, based on input coordinate
reference system to WGS84
:param infile: name of the input file
:param output: name of the output file
:param insrs: epsg code of input file
:param format_name: ogr format name
"""
logging.info('Transforming %s from %s to %s' % (infile, insrs, output))
in_srs = osr.SpatialReference()
in_srs.ImportFromEPSG(insrs)
out_srs = osr.SpatialReference()
out_srs.ImportFromEPSG(4324)
coordTrans = osr.CoordinateTransformation(in_srs, out_srs)
in_dsn = ogr.Open(infile)
in_layer = in_dsn.GetLayer()
in_feature_definition = in_layer.GetLayerDefn()
out_driver = ogr.GetDriverByName(format_name)
out_dsn = out_driver.CreateDataSource(output)
out_layer = out_dsn.CreateLayer(in_layer.GetName(),
geom_type=in_layer.GetGeomType())
# add fields
for i in range(0, in_feature_definition.GetFieldCount()):
fieldDefn = in_feature_definition.GetFieldDefn(i)
out_layer.CreateField(fieldDefn)
# get the output layer's feature definition
out_feature_definition = out_layer.GetLayerDefn()
# loop through the input features
inFeature = in_layer.GetNextFeature()
while inFeature:
# get the input geometry
geom = inFeature.GetGeometryRef().Clone()
# reproject the geometry
geom.Transform(coordTrans)
# create a new feature
outFeature = ogr.Feature(out_feature_definition)
# set the geometry and attribute
outFeature.SetGeometry(geom)
for i in range(0, out_feature_definition.GetFieldCount()):
outFeature.SetField(out_feature_definition.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))
# add the feature to the shapefile
out_layer.CreateFeature(outFeature)
# destroy the features and get the next input feature
outFeature.Destroy()
inFeature.Destroy()
inFeature = in_layer.GetNextFeature()
# close the shapefiles
in_dsn.Destroy()
out_dsn.Destroy()
def join_files(folder):
"""
Merge all files in given folder into one datasource
:param folder: name of temporary folder to be joined
:return: resulting joined files file name
"""
first_ds = _get_first_source_dataset(folder)
first_layer = first_ds.GetLayer(0)
drv = ogr.GetDriverByName('GeoJSON')
tmpfile = tempfile.mktemp(suffix=".json")
out_dsn = drv.CreateDataSource(tmpfile)
out_layer = out_dsn.CopyLayer(first_layer, new_name=first_layer.GetName())
for source in os.listdir(folder)[1:]:
logging.info('Joining file %s to %s' % (source, tmpfile))
dsn = ogr.Open(os.path.join(folder, source))
layer = dsn.GetLayer()
nfeatures = layer.GetFeatureCount()
for i in range(nfeatures):
feature = layer.GetNextFeature()
out_layer.CreateFeature(feature.Clone())
out_dsn.Destroy()
return tmpfile
def download_data(url, encoding):
"""
Download data to folder
:param url: url of the service
:param encoding: input data encoding
:return: folder with downloaded data
"""
import urllib2
import json
import time
folder = tempfile.mkdtemp('arcgis-scratch')
if (url.endswith('/')):
url = url.rstrip('/')
start = 0
while (start >= 0):
time.sleep(0.5)
scratchurl = url + '/query?where=OBJECTID+>+' + str(start) + '&f=pjson&outFields=*'
f = urllib2.urlopen(scratchurl)
content = f.read().decode('utf-8')
output_name = os.path.join(folder, str(start) + '.json')
logging.info('Downloading scratch %s' % output_name)
out = open(output_name, 'wb')
out.write(content.encode(encoding))
out.close()
jsn = json.load(open(os.path.join(folder, str(start) + '.json'), "r"),
encoding=encoding)
try:
if (jsn['exceededTransferLimit']):
start += 1000
except:
start = -1
return folder
def download(url, output, encoding, insrs, format_name):
"""Download and store given data
:param url: url of the service
:param output: name of output file
:param encoding: encoding of input data
:param insrs: input source reference system
:param format: OGR format name
"""
folder = download_data(url, encoding)
joined_file = join_files(folder)
transform(joined_file, output, insrs, format_name)
shutil.rmtree(folder)
os.remove(joined_file)
if not os.path.isfile(output):
raise Error("Output file not created, the whole process failed")
else:
logging.info("File %s successfuly created" % output)
def main():
import argparse
parser = argparse.ArgumentParser(description='Download data via ESRI ArcGIS Rest API')
parser.add_argument('--url',
help='URL of the service', required=True)
parser.add_argument('--output',
help='output file name', required=True)
parser.add_argument('--encoding', default='utf-8',
help='encoding of input data, default "utf-8"')
parser.add_argument('--srs', default='5514',
help='coordinate reference system code of input data, default "5514"')
parser.add_argument('--overwrite', action='store_true',
help='output file name')
parser.add_argument('--verbose', action='store_true',
help='verbose output')
parser.add_argument('--format', default="GeoJSON",
help='OGR data format name, default GeoJSON')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if os.path.exists(args.output):
logging.warning('Output file %s already exists and will be overwritten' % args.output)
if args.overwrite:
os.remove(args.output)
logging.info('Output file %s removed' % args.output)
else:
raise IOError("File already exists, try --overwrite")
download(
args.url,
args.output,
args.encoding,
int(args.srs),
args.format)
if __name__ == '__main__':
main()
class Edown(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Endpoint Downloader"
self.description = "This tool downloads geometry from queryable ArcGis Server endpoint."
self.canRunInBackground = False
def getParameterInfo(self):
"""Define parameter definitions"""
param0 = arcpy.Parameter(
displayName="ArcGis Server Endpoint URL",
name="url",
datatype="GPString",
parameterType="Required",
direction="Input")
param1 = arcpy.Parameter(
displayName="Scratch Folder",
name="scratch",
datatype="DEFolder",
parameterType="Required",
direction="Input")
param2 = arcpy.Parameter(
displayName="Output Geodatabase",
name="outDB",
datatype="DEWorkspace",
parameterType="Required",
direction="Input")
param3 = arcpy.Parameter(
displayName="Output Feature Class",
name="outFe",
datatype="GPString",
parameterType="Required",
direction="Output")
params = [param0, param1, param2, param3]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
import urllib2
import json
import time
import os
esriServer = parameters[0].valueAsText
scratch = parameters[1].valueAsText
outDB = parameters[2].valueAsText
outFe = parameters[3].valueAsText
arcpy.env.workspace = outDB
arcpy.env.overwriteOutput = True
if (esriServer.endswith('/')):
esriServer = esriServer.rstrip('/')
start = 0
while (start >= 0):
time.sleep(0.5)
url = esriServer + '/query?where=OBJECTID+>+' + str(start) + '&f=pjson&outFields=*'
f = urllib2.urlopen(url)
content = f.read().decode('utf-8')
out = open(scratch + '\\feDownTemp_' + str(start) + '.json', 'wb')
out.write(content.encode('windows-1250'))
out.close()
jsn = json.load(open(scratch + '\\feDownTemp_' + str(start) + '.json'), encoding="windows-1250")
try:
if (jsn['exceededTransferLimit']):
start += 1000
except:
start = -1
jsons = []
for fle in os.listdir(scratch + '\\'):
if (fle.endswith('.json')):
jsons.append(fle.split('.')[0])
for jsn in jsons:
arcpy.JSONToFeatures_conversion(scratch + '\\' + jsn + '.json', jsn)
arcpy.Merge_management(jsons, outFe)
for jsn in jsons:
arcpy.Delete_management(jsn, '')
for jsn in jsons:
os.remove(scratch + '\\' + jsn + '.json')
mxd = arcpy.mapping.MapDocument('CURRENT')
df = arcpy.mapping.ListDataFrames(mxd)[0]
layer = arcpy.mapping.Layer(outFe)
arcpy.mapping.AddLayer(df, layer, 'TOP')
del mxd
return
| {
"repo_name": "datastory/Data-Tools-for-ArcGIS",
"path": "Service Geometry Downloader.py",
"copies": "2",
"size": "10287",
"license": "mit",
"hash": 1727112618717945300,
"line_mean": 28.3076923077,
"line_max": 107,
"alpha_frac": 0.6360454943,
"autogenerated": false,
"ratio": 3.744812522752093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01774517966689622,
"num_lines": 351
} |
"""ArcGIS Geoprocessing module for Scanweb data
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import os.path
import re
import arcpy
from .. import get_scanweb
from ... import _DEFAULT_ACCESS_CODE
from ...gp import TABLE_DEFS_DICT_DICT
WEATHER_READINGS_TABLE_NAME = "ScanwebWeatherReadings"
SURFACE_TABLE_NAME = "ScanwebSurfaceMeasurements"
SUBSURFACE_TABLE_NAME = "ScanwebSubSurfaceMeasurements"
def create_tables(workspace, force_overwrite=False, template_gdb=None):
"""Creates the tables
"""
if not arcpy.Exists(workspace):
# TODO: Use AddIDMessage
arcpy.AddError("Workspace not found: %s" % workspace)
tables_created = 0
if template_gdb and not arcpy.Exists(template_gdb):
arcpy.AddWarning("Template geodatabase not found: %s" % template_gdb)
template_gdb = None
for name in (WEATHER_READINGS_TABLE_NAME, SURFACE_TABLE_NAME, SUBSURFACE_TABLE_NAME):
table_path = os.path.join(workspace, name)
template = None
if template_gdb:
template = os.path.join(template_gdb, name)
if not arcpy.Exists(template):
arcpy.AddWarning("Template Feature Class or table does not exist: %s" % template)
template = None
# Skip table creation if table already exists.
if arcpy.Exists(table_path):
if force_overwrite:
arcpy.management.Delete(table_path)
else:
continue
if name == WEATHER_READINGS_TABLE_NAME:
sr = arcpy.SpatialReference(4326)
arcpy.management.CreateFeatureclass(
workspace, name, "POINT", template, "No", "Yes", sr)
else:
arcpy.management.CreateTable(workspace, name, template)
arcpy.AddMessage(arcpy.GetMessages(1))
fields_dict = TABLE_DEFS_DICT_DICT[name]["fields"]
if not template:
# AddFields is setting default value for strings to # instead of Null.
# Using individual calls to AddField instead to work around this issue.
# if arcpy.management.AddFields:
# field_description = []
# for field_name, field_type in fields_dict.items():
# field_description.append(
# [field_name, field_type, field_name, None, None])
# arcpy.management.AddFields(table_path, field_description)
# arcpy.AddMessage(arcpy.GetMessages())
# else:
# for field_name, field_type in fields_dict.items():
# arcpy.management.AddField(table_path, field_name, field_type)
# arcpy.AddMessage(arcpy.GetMessages())
for field_name, field_type in fields_dict.items():
arcpy.management.AddField(table_path, field_name, field_type)
# arcpy.AddMessage(arcpy.GetMessages(1))
tables_created += 1
if not tables_created:
return
if re.match(r"((AlreadyInitialized)|(Available))", arcpy.CheckProduct("arceditor"), re.IGNORECASE):
try:
# Create relationships: arcpy.management.AddRelate()
for table_name in (SURFACE_TABLE_NAME, SUBSURFACE_TABLE_NAME):
arcpy.management.CreateRelationshipClass(
origin_table=os.path.join(
workspace, WEATHER_READINGS_TABLE_NAME),
destination_table=os.path.join(workspace, table_name),
relationship_type="COMPOSITE",
forward_label="%sTo%s" % (
WEATHER_READINGS_TABLE_NAME, table_name),
backward_label="%sFrom%s" % (
table_name, WEATHER_READINGS_TABLE_NAME),
message_direction="FORWARD",
cardinality="ONE_TO_MANY",
attributed="NONE",
origin_primary_key="StationName",
origin_foreign_key="StationName"
)
except arcpy.ExecuteError as err:
print("Could not create relationship classes\n%s" % err)
else:
arcpy.AddWarning(
"Could not create relationship classes because required license was not available")
def populate_feature_classes(workspace, accesscode=_DEFAULT_ACCESS_CODE):
"""Creates or updates ScanWeb feature classes and tables
"""
create_tables(workspace)
scanweb_data = get_scanweb(accesscode)
# Delete the data from the existing tables.
arcpy.AddMessage("Deleting existing data from tables...")
for table in (SURFACE_TABLE_NAME, SUBSURFACE_TABLE_NAME, WEATHER_READINGS_TABLE_NAME):
arcpy.management.DeleteRows("%s/%s" % (workspace, table))
delete_msgs = arcpy.GetMessages(2)
if delete_msgs:
arcpy.AddMessage(delete_msgs)
fc_fields = list(TABLE_DEFS_DICT_DICT[WEATHER_READINGS_TABLE_NAME]["fields"].keys(
)) + ["SHAPE@XY", "SHAPE@Z"]
surface_fields = list(
TABLE_DEFS_DICT_DICT[SURFACE_TABLE_NAME]["fields"].keys())
subsurface_fields = list(
TABLE_DEFS_DICT_DICT[SUBSURFACE_TABLE_NAME]["fields"].keys())
surface_data = []
subsurface_data = []
fc_cursor = arcpy.da.InsertCursor(os.path.join(
workspace, WEATHER_READINGS_TABLE_NAME), fc_fields)
with fc_cursor:
for item in scanweb_data:
point = None
if item.Longitude != 0 and item.Latitude != 0:
point = (item.Longitude, item.Latitude)
row = list(
map(item.__dict__.get, fc_fields[:-2])) + [point, item.Elevation]
try:
fc_cursor.insertRow(row)
except RuntimeError as ex:
arcpy.AddWarning("Error inserting row into %s: %s\n%s" % (WEATHER_READINGS_TABLE_NAME, row, ex))
station_name = row[1]
if item.SurfaceMeasurements:
for m in item.SurfaceMeasurements:
mrow = list(map(m.__dict__.get, surface_fields))
mrow[0] = station_name
surface_data.append(mrow)
if item.SubSurfaceMeasurements:
for m in item.SurfaceMeasurements:
mrow = list(map(m.__dict__.get, subsurface_fields))
mrow[0] = station_name
subsurface_data.append(mrow)
surf_cursor = arcpy.da.InsertCursor(os.path.join(
workspace, SURFACE_TABLE_NAME), surface_fields)
with surf_cursor:
for item in surface_data:
try:
surf_cursor.insertRow(item)
except TypeError:
print(item)
raise
sub_cursor = arcpy.da.InsertCursor(os.path.join(
workspace, SUBSURFACE_TABLE_NAME), subsurface_fields)
with sub_cursor:
for item in subsurface_data:
sub_cursor.insertRow(item)
| {
"repo_name": "WSDOT-GIS/wsdot-traffic-gp",
"path": "wsdottraffic/scanweb/gp/__init__.py",
"copies": "1",
"size": "6946",
"license": "unlicense",
"hash": 8132495767387268000,
"line_mean": 39.8588235294,
"line_max": 112,
"alpha_frac": 0.5948747481,
"autogenerated": false,
"ratio": 4.005767012687428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5100641760787428,
"avg_score": null,
"num_lines": null
} |
""" ArcGIS python toolboxes for ``propagator``.
This contains Classes compatible with ArcGIS python toolbox
infrastructure.
(c) Geosyntec Consultants, 2015.
Released under the BSD 3-clause license (see LICENSE file for more info)
Written by Paul Hobson (phobson@geosyntec.com)
"""
from functools import partial
from textwrap import dedent
import numpy
import arcpy
from propagator import analysis
from propagator import validate
from propagator import utils
from propagator import base_tbx
def propagate(subcatchments=None, id_col=None, ds_col=None,
monitoring_locations=None, ml_filter=None,
ml_filter_cols=None, value_columns=None, streams=None,
output_path=None, verbose=False, asMessage=False):
"""
Propagate water quality scores upstream from the subcatchments of
a watershed.
Parameters
----------
subcatchments : str
Path to the feature class containing the subcatchments.
Attribute table must contain fields for the subcatchment ID
and the ID of the downstream subcatchment.
id_col, ds_col : str
Names of the fields in the ``subcatchments`` feature class that
specifies the subcatchment ID and the ID of the downstream
subcatchment, respectively.
monitoring_locations : str
Path to the feature class containing the monitoring locations
and water quality scores.
value_columns : list of str
List of the fields in ``monitoring_locations`` that contains
water quality score that should be propagated.
ml_filter : callable, optional
Function used to exclude (remove) monitoring locations from
from aggregation/propagation.
ml_filter_cols : str, optional
Name of any additional columns in ``monitoring_locations`` that
are required to use ``ml_filter``.
streams : str
Path to the feature class containing the streams.
output_path : str
Path to where the the new subcatchments feature class with the
propagated water quality scores should be saved.
Returns
-------
output_path : str
Examples
--------
>>> import propagator
>>> from propagator import utils
>>> with utils.WorkSpace('C:/gis/SOC.gdb'):
... propagator.propagate(
... subcatchments='subbasins',
... id_col='Catch_ID',
... ds_col='DS_ID',
... monitoring_locations='wq_data',
... value_columns=['Dry_Metals', 'Wet_Metals', 'Wet_TSS'],
... ml_filter=lambda row: row['StationType'] != 'Coastal',
... ml_filter_cols=['StationType'],
... streams='SOC_streams',
... output_path='propagated_metals'
... )
See also
--------
propagator.analysis.preprocess_wq
propagator.analysis.mark_edges
propagator.analysis.propagate_scores
propagator.analysis.aggregate_streams_by_subcatchment
propagator.utils.update_attribute_table
"""
subcatchment_output = utils.add_suffix_to_filename(output_path, 'subcatchments')
stream_output = utils.add_suffix_to_filename(output_path, 'streams')
wq, result_columns = analysis.preprocess_wq(
monitoring_locations=monitoring_locations,
ml_filter=ml_filter,
ml_filter_cols=ml_filter_cols,
subcatchments=subcatchments,
value_columns=value_columns,
id_col=id_col,
ds_col=ds_col,
output_path=subcatchment_output,
verbose=verbose,
asMessage=asMessage,
msg="Aggregating water quality data in subcatchments"
)
wq = analysis.mark_edges(
wq,
id_col=id_col,
ds_col=ds_col,
edge_ID='EDGE',
verbose=verbose,
asMessage=asMessage,
msg="Marking all subcatchments that flow out of the watershed"
)
for n, res_col in enumerate(result_columns, 1):
wq = analysis.propagate_scores(
subcatchment_array=wq,
id_col=id_col,
ds_col=ds_col,
value_column=res_col,
edge_ID='EDGE',
verbose=verbose,
asMessage=asMessage,
msg="{} of {}: Propagating {} scores".format(n, len(result_columns), res_col)
)
utils.update_attribute_table(subcatchment_output, wq, id_col, result_columns)
stream_output = analysis.aggregate_streams_by_subcatchment(
stream_layer=streams,
subcatchment_layer=subcatchment_output,
id_col=id_col,
ds_col=ds_col,
other_cols=result_columns,
agg_method='first',
output_layer=stream_output,
verbose=verbose,
asMessage=asMessage,
msg='Aggregating and associating scores with streams.',
)
return subcatchment_output, stream_output
def accumulate(subcatchments_layer=None, id_col=None, ds_col=None,
value_columns=None, streams_layer=None,
output_layer=None, default_aggfxn='sum',
ignored_value=None, verbose=False, asMessage=False):
"""
Accumulate upstream subcatchment properties in each stream segment.
Parameters
----------
subcatchments_layer, streams_layer : str
Names of the feature classes containing subcatchments and
streams, respectively.
id_col, ds_col : str
Names of the fields in ``subcatchment_layer`` that contain the
subcatchment ID and downstream subcatchment ID, respectively.
sum_cols, avg_cols : list of str
Names of the fields that will be accumulated by summing (e.g.,
number of permit violations) and area-weighted averaging (e.g.,
percent impervious land cover).
.. note ::
Do not include a column for subcatchment area in
``sum_cols``. Specify that in ``area_col`` instead.
value_columns : list of str
List of the fields in ``subcatchments`` that contains
water quality score and watershed property that should
be propagated.
``subcatchments_layer``. Falls back to computing areas
on-the-fly if not provided.
output_layer : str, optional
Names of the new layer where the results should be saved.
Returns
-------
output_layer : str
Names of the new layer where the results were successfully
saved.
See also
--------
propagator.analysis.aggregate_streams_by_subcatchment
propagator.analysis.collect_upstream_attributes
propagator.utils.rec_groupby
"""
# Separate value columns into field name and aggregation method
value_columns = validate.value_column_stats(value_columns, default_aggfxn)
value_columns_aggmethods = [i[1] for i in value_columns]
vc_field_wfactor = []
for col, aggmethod, wfactor in value_columns:
if aggmethod.lower() == 'weighted_average':
vc_field_wfactor.append([col, wfactor])
else:
vc_field_wfactor.append(col)
# define the Statistic objects that will be passed to `rec_groupby`
statfxns = []
for agg in value_columns_aggmethods:
statfxns.append(partial(
utils.stats_with_ignored_values,
statfxn=analysis.AGG_METHOD_DICT[agg.lower()],
ignored_value=ignored_value
))
res_columns = [
'{}{}'.format(prefix[:3].upper(), col)
for col, prefix, _ in value_columns
]
stats = [
utils.Statistic(srccol, statfxn, rescol)
for srccol, statfxn, rescol in zip(vc_field_wfactor, statfxns, res_columns)
]
# create a unique list of columns we need
# from the subcatchment layer
target_fields = []
for s in stats:
if numpy.isscalar(s.srccol):
target_fields.append(s.srccol)
else:
target_fields.extend(s.srccol)
target_fields = numpy.unique(target_fields)
# split the stream at the subcatchment boundaries and then
# aggregate all of the stream w/i each subcatchment
# into single geometries/records.
split_streams_layer = analysis.aggregate_streams_by_subcatchment(
stream_layer=streams_layer,
subcatchment_layer=subcatchments_layer,
id_col=id_col,
ds_col=ds_col,
other_cols=target_fields,
output_layer=output_layer,
agg_method="first", # first works b/c all values are equal
)
# Add target_field columns back to spilt_stream_layer.
final_fields = [s.rescol for s in stats]
for field in final_fields:
utils.add_field_with_value(
table=split_streams_layer,
field_name=field,
field_value=None,
field_type='DOUBLE',
)
# load the split/aggregated streams' attribute table
split_streams_table = utils.load_attribute_table(
split_streams_layer, id_col, ds_col, *final_fields
)
# load the subcatchment attribute table
subcatchments_table = utils.load_attribute_table(
subcatchments_layer, id_col, ds_col, *target_fields
)
upstream_attributes = analysis.collect_upstream_attributes(
subcatchments_table=subcatchments_table,
target_subcatchments=split_streams_table,
id_col=id_col,
ds_col=ds_col,
preserved_fields=target_fields
)
aggregated_properties = utils.rec_groupby(upstream_attributes, id_col, *stats)
# Update output layer with aggregated values.
utils.update_attribute_table(
layerpath=split_streams_layer,
attribute_array=aggregated_properties,
id_column=id_col,
orig_columns=final_fields,
)
# Remove extraneous columns
required_columns = [id_col, ds_col, 'FID', 'Shape', 'Shape_Length', 'Shape_Area', 'OBJECTID']
fields_to_remove = filter(
lambda name: name not in required_columns and name not in final_fields,
[f.name for f in arcpy.ListFields(split_streams_layer)]
)
utils.delete_columns(split_streams_layer, *fields_to_remove)
return split_streams_layer
class Propagator(base_tbx.BaseToolbox_Mixin):
"""
ArcGIS Python toolbox to propagate water quality metrics upstream
through subcatchments in a watershed.
Parameters
----------
None
See also
--------
Accumulator
"""
def __init__(self):
"""
Define the tool (tool name is the name of the class).
"""
# std attributes
self.label = "1 - Propagate WQ scores to upstream subcatchments"
self.description = dedent("""
TDB
""")
# lazy properties
self._workspace = None
self._subcatchments = None
self._ID_column = None
self._downstream_ID_column = None
self._monitoring_locations = None
self._ml_type_col = None
self._included_ml_types = None
self._value_columns = None
self._output_layer = None
self._streams = None
self._add_output_to_map = None
@property
def monitoring_locations(self):
""" The monitoring location points whose data will be
propagated to the subcatchments. """
if self._monitoring_locations is None:
self._monitoring_locations = arcpy.Parameter(
displayName="Monitoring Locations",
name="monitoring_locations",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input",
multiValue=False
)
self._set_parameter_dependency(self._monitoring_locations, self.workspace)
return self._monitoring_locations
@property
def ml_type_col(self):
if self._ml_type_col is None:
self._ml_type_col = arcpy.Parameter(
displayName="Monitoring Location Type Column",
name="ml_type_col",
datatype="Field",
parameterType="Required",
direction="Input",
multiValue=False
)
self._set_parameter_dependency(self._ml_type_col, self.monitoring_locations)
return self._ml_type_col
@property
def included_ml_types(self):
if self._included_ml_types is None:
self._included_ml_types = arcpy.Parameter(
displayName="Monitoring Location Types To Include",
name="included_ml_types",
datatype="GPString",
parameterType="Required",
direction="Input",
multiValue=True,
)
self._included_ml_types.filter.type = "ValueList"
return self._included_ml_types
@property
def value_columns(self):
""" The names of the fields to be propagated into upstream
subcatchments.
Note on property 'multiValue': it appears that by setting
datatype to 'Value Table' the multiValue becomes irrevlant.
Regardless on how we set the value here, when the function
is called a False value is assigned to multiValue. However,
the toolbox will still accept multiple entries.
"""
if self._value_columns is None:
self._value_columns = arcpy.Parameter(
displayName="Values to be Propagated",
name="value_columns",
datatype="Value Table",
parameterType="Required",
direction="Input",
multiValue=True,
)
self._value_columns.columns = [
['String', 'Values To Propagate'],
['String', 'Aggregation Method']
]
self._set_parameter_dependency(self._value_columns, self.monitoring_locations)
return self._value_columns
def updateParameters(self, parameters):
params = self._get_parameter_dict(parameters)
param_vals = self._get_parameter_values(parameters)
ws = param_vals.get('workspace', '.')
vc = params['value_columns']
with utils.WorkSpace(ws):
ml = param_vals['monitoring_locations']
if params['ml_type_col'].altered:
col = param_vals['ml_type_col']
values = utils.unique_field_values(ml, col).tolist()
params['included_ml_types'].filter.list = values
if params['monitoring_locations'].value:
agg_methods = analysis.AGG_METHOD_DICT.copy()
agg_methods.pop('weighted_average', None)
fields = analysis._get_wq_fields(ml, ['dry', 'wet'])
self._set_filter_list(vc.filters[0], fields)
self._set_filter_list(vc.filters[1], list(agg_methods.keys()))
self._update_value_table_with_default(vc, 'average')
def _params_as_list(self):
params = [
self.workspace,
self.subcatchments,
self.ID_column,
self.downstream_ID_column,
self.monitoring_locations,
self.ml_type_col,
self.included_ml_types,
self.value_columns,
self.streams,
self.output_layer,
self.add_output_to_map,
]
return params
def analyze(self, **params):
""" Propagates water quality scores from monitoring locations
to upstream subcatchments. Calls directly to :func:`propagate`.
"""
# analysis options
ws = params.pop('workspace', '.')
overwrite = params.pop('overwrite', True)
add_output_to_map = params.pop('add_output_to_map', False)
output_layer = params.pop('output_layer', None)
# subcatchment info
sc = params.pop('subcatchments', None)
ID_col = params.pop('ID_column', None)
downstream_ID_col = params.pop('downstream_ID_column', None)
# monitoring location info
ml = params.pop('monitoring_locations', None)
ml_type_col = params.pop('ml_type_col', None)
included_ml_types = validate.non_empty_list(
params.pop('included_ml_types', None),
on_fail='create'
)
# monitoring location type filter function
if ml_type_col is not None and len(included_ml_types) > 0:
ml_filter = lambda row: row[ml_type_col] in included_ml_types
else:
ml_filter = None
# value columns and aggregations
value_cols_string = params.pop('value_columns', None)
value_columns = [vc.split(' ') for vc in value_cols_string.replace(' #', ' average').split(';')]
# streams data
streams = params.pop('streams', None)
# perform the analysis
with utils.WorkSpace(ws), utils.OverwriteState(overwrite):
output_layers = propagate(
subcatchments=sc,
id_col=ID_col,
ds_col=downstream_ID_col,
monitoring_locations=ml,
ml_filter=ml_filter,
ml_filter_cols=ml_type_col,
value_columns=value_columns,
output_path=output_layer,
streams=streams,
verbose=True,
asMessage=True,
)
if add_output_to_map:
for lyr in output_layers:
self._add_to_map(lyr)
return output_layers
class Accumulator(base_tbx.BaseToolbox_Mixin):
"""
ArcGIS Python toolbox to accumulate subcatchments attributes and
water quality parameters downstream through a stream.
Parameters
----------
None
See also
--------
Propagator
"""
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
# std attributes
self.label = "2 - Accumulate subcatchment properties to stream"
self.description = dedent("""
TDB
""")
# lazy properties
self._workspace = None
self._subcatchments = None
self._ID_column = None
self._downstream_ID_column = None
self._value_columns = None
self._streams = None
self._output_layer = None
self._add_output_to_map = None
def _params_as_list(self):
params = [
self.workspace,
self.subcatchments,
self.ID_column,
self.downstream_ID_column,
self.value_columns,
self.streams,
self.output_layer,
self.add_output_to_map,
]
return params
@property
def value_columns(self):
""" The names of the fields to be propagated into upstream
subcatchments.
Note on property 'multiValue': it appears that by setting
datatype to 'Value Table' the multiValue becomes irrevlant.
Regardless on how we set the value here, when the function
is called a False value is assigned to multiValue. However,
the toolbox will still accept multiple entries.
"""
if self._value_columns is None:
self._value_columns = arcpy.Parameter(
displayName="Values to be Accumulated",
name="value_columns",
datatype="Value Table",
parameterType="Required",
direction="Input",
multiValue=True,
)
self._value_columns.columns = [
['String', 'Values To Accumulate'],
['String', 'Accumulation Method'],
['String', 'Weighting Factor']
]
self._set_parameter_dependency(self._value_columns, self.subcatchments)
return self._value_columns
def updateParameters(self, parameters):
params = self._get_parameter_dict(parameters)
param_vals = self._get_parameter_values(parameters)
ws = param_vals.get('workspace', '.')
vc = params['value_columns']
with utils.WorkSpace(ws):
sc = param_vals['subcatchments']
# handles field name from Propagator output
prefix = [i[0:3] for i in analysis.AGG_METHOD_DICT.keys()]
# handles unmodified field name
prefix.extend(['area', 'imp', 'dry', 'wet'])
if params['subcatchments'].value:
fields = analysis._get_wq_fields(sc, prefix)
fields.append('n/a')
self._set_filter_list(vc.filters[0], fields)
self._set_filter_list(vc.filters[1], list(analysis.AGG_METHOD_DICT.keys()))
self._set_filter_list(vc.filters[2], fields)
self._update_value_table_with_default(vc, ['sum', 'n/a'])
def analyze(self, **params):
""" Accumulates subcatchments properties from upstream
subcatchments into stream. Calls directly to :func:`accumulate`.
"""
# analysis options
ws = params.pop('workspace', '.')
overwrite = params.pop('overwrite', True)
add_output_to_map = params.pop('add_output_to_map', False)
# input parameters
sc = params.pop('subcatchments', None)
ID_col = params.pop('ID_column', None)
downstream_ID_col = params.pop('downstream_ID_column', None)
# value columns and aggregations
value_cols_string = params.pop('value_columns', None)
value_columns = [vc.split(' ') for vc in value_cols_string.replace(' #', ' average').split(';')]
streams = params.pop('streams', None)
output_layer = params.pop('output_layer', None)
with utils.WorkSpace(ws), utils.OverwriteState(overwrite):
output_layers = accumulate(
subcatchments_layer=sc,
id_col=ID_col,
ds_col=downstream_ID_col,
value_columns=value_columns,
streams_layer=streams,
output_layer=output_layer,
verbose=True,
asMessage=True,
)
if add_output_to_map:
self._add_to_map(output_layers)
return output_layers
| {
"repo_name": "gcfang/python-propagator",
"path": "propagator/toolbox.py",
"copies": "2",
"size": "22047",
"license": "bsd-3-clause",
"hash": 6078687056194891000,
"line_mean": 33.3411214953,
"line_max": 104,
"alpha_frac": 0.5993559214,
"autogenerated": false,
"ratio": 4.177150435771125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002465461898056427,
"num_lines": 642
} |
""" ArcGIS python toolboxes for python-tidegates.
This contains Classes compatible with ArcGIS python toolbox
infrastructure.
(c) Geosyntec Consultants, 2015.
Released under the BSD 3-clause license (see LICENSE file for more info)
Written by Paul Hobson (phobson@geosyntec.com)
"""
import os
from textwrap import dedent
from collections import OrderedDict
import arcpy
import numpy
import tidegates
from tidegates import utils
# ALL ELEVATIONS IN FEET
SEALEVELRISE = numpy.arange(7)
SURGES = OrderedDict(MHHW=4.0)
SURGES['10yr'] = 8.0
SURGES['50yr'] = 9.6
SURGES['100yr'] = 10.5
class StandardScenarios(object):
""" ArcGIS Python toolbox to analyze floods during the standard sea
level rise and storm surge scenarios.
Parameters
----------
None
See also
--------
Flooder
"""
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
# std attributes
self.label = "2 - Evaluate all standard scenarios"
self.canRunInBackground = True
self.description = dedent("""
Allows the user to recreate the standard scenarios with their
own input.
The standard scenarios are each combination of storm surges
(MHHW, 10-yr, 50-yr, 100-yr) and sea level rise up to 6 feet in
1-ft increments.
""")
# lazy properties
self._workspace = None
self._dem = None
self._zones = None
self._ID_column = None
self._flood_output = None
self._building_output = None
self._wetland_output = None
self._wetlands = None
self._buildings = None
def isLicensed(self):
""" PART OF THE ESRI BLACK BOX.
Esri says:
Set whether tool is licensed to execute.
So I just make this always true b/c it's an open source project
with a BSD license -- (c) Geosyntec Consultants -- so who cares?
"""
return True
def updateMessages(self, parameters): # pragma: no cover
""" PART OF THE ESRI BLACK BOX.
Esri says:
Modify the messages created by internal validation for each
parameter of the tool. This method is called after internal
validation.
But I have no idea when or how internal validation is called so
that's pretty useless information.
"""
return
def updateParameters(self, parameters): # pragma: no cover
""" PART OF THE ESRI BLACK BOX.
Automatically called when any parameter is updated in the GUI.
The general flow is like this:
1. User interacts with GUI, filling out some input element
2. ``self.getParameterInfo`` is called
3. Parameteter are fed to this method as a list
I used to set the parameter dependecies in here, but that didn't
work. So now this does nothing and dependecies are set when the
parameters (as class properties) are created (i.e., called for
the first time).
"""
return
def getParameterInfo(self):
""" PART OF THE ESRI BLACK BOX
This *must* return a list of all of the parameter definitions.
Esri recommends that you create all of the parameters in here,
and always return that list. I instead chose to create the list
from the class properties I've defined. Accessing things with
meaningful names is always better, in my opinion.
"""
return self._params_as_list()
def execute(self, parameters, messages): # pragma: no cover
""" PART OF THE ESRI BLACK BOX
This method is called when the tool is actually executed. It
gets passed magics lists of parameters and messages that no one
can actually see.
Due to this mysterious nature, I do the following:
1) turn all of the elements of the list into a dictionary
so that we can access them in a meaningful way. This
means, instead of doing something like
.. code-block:: python
dem = parameters[0].valueAsText
zones = parameters[1].valueAsText
# yada yada
nth_param = parameters[n].valueAsText
for EVERY. SINGLE. PARAMETER, we can instead do something like:
.. code-block:: python
params = self._get_parameter_values(parameters, multivals=['elevation'])
dem = params['dem']
zones = params['zones'].
# yada
This is much cleaner, in my opinion, and we don't have to
magically know where in the list of parameters e.g., the
DEM is found. Take note, Esri.
2) generate a list of scenarios usings :meth:`.make_scenarios`.
3) loop through those scenarios.
4) call :meth:`.analyze` on each scenario.
5) call :meth:`.finish_results` on all of the layers
generated by the loop.
"""
params = self._get_parameter_values(parameters, multivals=['elevation'])
self.main_execute(**params)
return None
@staticmethod
def _set_parameter_dependency(downstream, *upstream):
""" Set the dependecy of a arcpy.Parameter
Parameters
----------
downstream : arcpy.Parameter
The Parameter that is reliant on an upstream parameter.
upstream : acrpy.Parameters
An arbitraty number of "upstream" parameters on which the
"downstream" parameter depends.
Returns
-------
None
See Also
--------
http://goo.gl/HcR6WJ
"""
downstream.parameterDependencies = [u.name for u in upstream]
@staticmethod
def _show_header(title, verbose=True):
""" Creates and shows a little header from a title.
Parameters
----------
title : str
The message to be shown
verbose : bool, optional (True)
Whether or not the final message should be printed
Returns
-------
header : str
The formatted title as a header
Examples
--------
>>> Flooder._show_header('Hello, world', verbose=True)
'Hello, world'
--------------
"""
underline = ''.join(['-'] * len(title))
header = '\n{}\n{}'.format(title, underline)
utils._status(header, verbose=verbose, asMessage=True, addTab=False)
return header
@staticmethod
def _add_to_map(layerfile, mxd=None):
""" Adds a layer or raster to the "CURRENT" map.
Parameters
----------
layerfile : str
Path to the layer or raster that will be added
mxd : str, optional
Path to an ESRI mapdocument.
Returns
-------
ezmd : EasyMapDoc
The "easy map document" to which ``layerfile`` was added.
"""
if mxd is None:
mxd = 'CURRENT'
ezmd = utils.EasyMapDoc(mxd)
if ezmd.mapdoc is not None:
ezmd.add_layer(layerfile)
return ezmd
@staticmethod
def _add_scenario_columns(layer, elev=None, surge=None, slr=None):
""" Adds scenario information to a shapefile/layer
Parameters
----------
layer : str or arcpy.mapping.Layer
The path to the layer, or the actual layer object that
will be modified in-place.
elev, slr : float, optional
Final elevation and sea level rise associated with the
scenario.
surge : str, optional
The name of the storm surge associated with the scenario
(e.g., MHHW, 100yr).
Returns
-------
None
"""
if elev is not None:
utils.add_field_with_value(
table=layer,
field_name="flood_elev",
field_value=float(elev),
msg="Adding 'flood_elev' field to ouput",
verbose=True,
asMessage=True
)
if surge is not None:
utils.add_field_with_value(
table=layer,
field_name="surge",
field_value=str(surge),
field_length=10,
msg="Adding storm surge field to ouput",
verbose=True,
asMessage=True
)
if slr is not None:
utils.add_field_with_value(
table=layer,
field_name="slr",
field_value=int(slr),
msg="Adding sea level rise field to ouput",
verbose=True,
asMessage=True
)
@staticmethod
def _get_parameter_values(parameters, multivals=None):
""" Returns a dictionary of the parameters values as passed in from
the ESRI black box. Keys are the parameter names, values are the
actual values (as text) of the parameters.
Parameters
----------
parameters : list of arcpy.Parameter-type thingies
The list of whatever-the-hell ESRI passes to the
:meth:`.execute` method of a toolbox.
multivals : str or list of str, optional
Parameter names that can take mulitiple values.
Returns
-------
params : dict
A python dictionary of parameter values mapped to the
parameter names.
"""
if multivals is None:
multivals = []
elif numpy.isscalar(multivals):
multivals = [multivals]
params = {}
for p in parameters:
value = p.valueAsText
if p.name in multivals:
value = value.split(';')
params[p.name] = value
return params
@staticmethod
def _prep_flooder_input(elev=None, surge=None, slr=None, num=None,
flood_output=None):
""" Prepares the basic inputs to the :meth:`.analyze` method.
Parameters
----------
elev, slr : float, optional
Final elevation and sea level rise associated with the
scenario.
surge : str, optional
The name of the storm surge associated with the scenario
(e.g., MHHW, 100yr).
flood_output : str
Path/filename to where the final flooded areas will be
saved.
Returns
-------
elevation : float
Flood elevation for this scenario.
title : str
The basis of the header to be displayed as an arcpy.Message.
temp_fname : str
Path/name of the temporary file where the intermediate
output will be saved.
"""
if elev is None:
elevation = float(slr + SURGES[surge])
title = "Analyzing flood elevation: {} ft ({}, {})".format(elevation, surge, slr)
else:
elevation = float(elev)
title = "Analyzing flood elevation: {} ft".format(elevation)
if flood_output is None:
raise ValueError('must provide a `flood_output`')
basename, ext = os.path.splitext(flood_output)
_temp_fname = basename + str(elevation).replace('.', '_') + ext
temp_fname = utils.create_temp_filename(_temp_fname, num=num, prefix='', filetype='shape')
return elevation, title, temp_fname
@property
def workspace(self):
""" The directory or geodatabase in which the analysis will
occur.
"""
if self._workspace is None:
self._workspace = arcpy.Parameter(
displayName="Analysis WorkSpace",
name='workspace',
datatype="DEWorkspace",
parameterType="Required",
direction="Input",
multiValue=False
)
return self._workspace
@property
def dem(self):
""" DEM file (topography) to be used in the analysis.
"""
if self._dem is None:
self._dem = arcpy.Parameter(
displayName="Digital Elevation Model",
name="dem",
datatype="DERasterDataset",
parameterType="Required",
direction="Input",
multiValue=False
)
self._set_parameter_dependency(self._dem, self.workspace)
return self._dem
@property
def zones(self):
""" The Zones of influence polygons to be used in the analysis.
"""
if self._zones is None:
self._zones = arcpy.Parameter(
displayName="Tidegate Zones of Influence",
name="zones",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input",
multiValue=False
)
self._set_parameter_dependency(self._zones, self.workspace)
return self._zones
@property
def ID_column(self):
""" Name of the field in `zones` that uniquely identifies
each zone of influence.
"""
if self._ID_column is None:
self._ID_column = arcpy.Parameter(
displayName="Column with Tidegate IDs",
name="ID_column",
datatype="Field",
parameterType="Required",
direction="Input",
multiValue=False
)
self._set_parameter_dependency(self._ID_column, self.zones)
return self._ID_column
@property
def flood_output(self):
""" Where the flooded areas for each scenario will be saved.
"""
if self._flood_output is None:
self._flood_output = arcpy.Parameter(
displayName="Output floods layer/filename",
name="flood_output",
datatype="GPString",
parameterType="Required",
direction="Input"
)
return self._flood_output
@property
def building_output(self):
""" Where the flooded buildings for each scenario will be saved.
"""
if self._building_output is None:
self._building_output = arcpy.Parameter(
displayName="Output layer/filename of impacted buildings",
name="building_output",
datatype="GPString",
parameterType="Optional",
direction="Input"
)
return self._building_output
@property
def wetland_output(self):
""" Where the flooded wetlands for each scenario will be saved.
"""
if self._wetland_output is None:
self._wetland_output = arcpy.Parameter(
displayName="Output layer/filename of impacted wetlands",
name="wetland_output",
datatype="GPString",
parameterType="Optional",
direction="Input"
)
return self._wetland_output
@property
def wetlands(self):
""" Input layer of wetlands.
"""
if self._wetlands is None:
self._wetlands = arcpy.Parameter(
displayName="Wetlands",
name="wetlands",
datatype="DEFeatureClass",
parameterType="Optional",
direction="Input",
multiValue=False
)
self._set_parameter_dependency(self._wetlands, self.workspace)
return self._wetlands
@property
def buildings(self):
""" Input layer of building footprints.
"""
if self._buildings is None:
self._buildings = arcpy.Parameter(
displayName="Buildings footprints",
name="buildings",
datatype="DEFeatureClass",
parameterType="Optional",
direction="Input",
multiValue=False
)
self._set_parameter_dependency(self._buildings, self.workspace)
return self._buildings
def _params_as_list(self):
params = [
self.workspace,
self.dem,
self.zones,
self.ID_column,
self.flood_output,
self.wetlands,
self.wetland_output,
self.buildings,
self.building_output,
]
return params
def make_scenarios(self, **params):
""" Makes a list of dictionaries of all scenario parameters that
will be analyzed by the toolbox.
Parameters
----------
**params : keyword arguments
Keyword arguments of analysis parameters generated by
:meth:`._get_parameter_values`
Returns
-------
scenarios : list of dictionaries
A list of dictionaries describing each scenario to be
analyzed. Keys of the dictionaries will be:
- *elev* - the custom elevation
- *surge_name* - the name of a storm surge event
- *surge_elev* - the elevation associated with "surge_name"
- *slr* - the amount of sea level rise to be considered.
When analyzing custom elevations, all other entries are set
to None. Likewise, when evaluating standard scenarios,
"elev" is None.
"""
scenario_list = []
# if elevation is in the parameters, then we *know* this is
# a custom flood elevation. Otherwise, we're evaluating the
# standard scenarios.
elevations = params.get('elevation', None)
if numpy.isscalar(elevations):
elevations = [elevations]
# standard scenarios
if elevations is None:
for surge_name, surge_elev in SURGES.items():
for slr in SEALEVELRISE:
scenario = {
'elev': None,
'surge_name': surge_name,
'surge_elev': surge_elev,
'slr': slr,
}
scenario_list.append(scenario)
# custom floods
else:
for elev in elevations:
scenario = {
'elev': float(elev),
'surge_name': None,
'surge_elev': None,
'slr': None,
}
scenario_list.append(scenario)
return scenario_list
def analyze(self, topo_array, zones_array, template,
elev=None, surge=None, slr=None, num=0, **params):
""" Tool-agnostic helper function for :meth:`.main_execute`.
Parameters
----------
topo_array : numpy array
Floating point array of the digital elevation model.
zones_array : numpy array
Categorical (integer) array of where each non-zero value
delineates a tidegate's zone of influence.
template : arcpy.Raster or tidegates.utils.RasterTemplate
A raster or raster-like object that define the spatial
extent of the analysis area. Required attributes are:
- templatemeanCellWidth
- templatemeanCellHeight
- templateextent.lowerLeft
elev : float, optional
Custom elevation to be analyzed
slr : float, optional
Sea level rise associated with the standard scenario.
surge : str, optional
The name of the storm surge associated with the scenario
(e.g., MHHW, 100yr).
**params : keyword arguments
Keyword arguments of analysis parameters generated by
`self._get_parameter_values`
Returns
-------
floods, flooded_wetlands, flooded_buildings : arcpy.mapping.Layers
Layers (or None) of the floods and flood-impacted wetlands
and buildings, respectively.
"""
# prep input
elev, title, floods_path = self._prep_flooder_input(
flood_output=params['flood_output'],
elev=elev,
surge=surge,
slr=slr,
num=num,
)
# define the scenario in the message windows
self._show_header(title)
# run the scenario and add its info the output attribute table
flooded_zones = tidegates.flood_area(
topo_array=topo_array,
zones_array=zones_array,
template=template,
ID_column=params['ID_column'],
elevation_feet=elev,
filename=floods_path,
num=num,
verbose=True,
asMessage=True
)
self._add_scenario_columns(flooded_zones.dataSource, elev=elev, surge=surge, slr=slr)
# setup temporary files for impacted wetlands and buildings
wl_path = utils.create_temp_filename(floods_path, prefix="_wetlands_", filetype='shape', num=num)
bldg_path = utils.create_temp_filename(floods_path, prefix="_buildings_", filetype='shape', num=num)
# asses impacts due to flooding
fldlyr, wtlndlyr, blgdlyr = tidegates.assess_impact(
floods_path=floods_path,
flood_idcol=params['ID_column'],
wetlands_path=params.get('wetlands', None),
wetlands_output=wl_path,
buildings_path=params.get('buildings', None),
buildings_output=bldg_path,
cleanup=False,
verbose=True,
asMessage=True,
)
if wtlndlyr is not None:
self._add_scenario_columns(wtlndlyr.dataSource, elev=elev, surge=surge, slr=slr)
return fldlyr, wtlndlyr, blgdlyr
@staticmethod
@utils.update_status()
def finish_results(outputname, results, **kwargs):
""" Merges and cleans up compiled output from `analyze`.
Parameters
----------
outputname : str
Path to where the final file sould be saved.
results : list of str
Lists of all of the floods, flooded wetlands, and flooded
buildings, respectively, that will be merged and deleted.
sourcename : str, optional
Path to the original source file of the results. If
provided, its attbutes will be spatially joined to the
concatenated results.
Returns
-------
None
"""
sourcename = kwargs.pop('sourcename', None)
cleanup = kwargs.pop('cleanup', True)
if outputname is not None:
if sourcename is not None:
tmp_fname = utils.create_temp_filename(outputname, filetype='shape')
utils.concat_results(tmp_fname, *results)
utils.join_results_to_baseline(
outputname,
utils.load_data(tmp_fname, 'layer'),
utils.load_data(sourcename, 'layer')
)
utils.cleanup_temp_results(tmp_fname)
else:
utils.concat_results(outputname, *results)
if cleanup:
utils.cleanup_temp_results(*results)
def main_execute(self, **params):
""" Performs the flood-impact analysis on multiple flood
elevations.
Parameters
----------
workspace : str
The folder or geodatabase where the analysis will be
executed.
dem : str
Filename of the digital elevation model (topography data)
to be used in determinging the inundated areas.
zones : str
Name of zones of influence layer.
ID_column : str
Name of the field in ``zones`` that uniquely identifies
each zone of influence.
elevation : list, optional
List of (custom) flood elevations to be analyzed. If this is
not provided, *all* of the standard scenarios will be
evaluated.
flood_output : str
Filename where the extent of flooding and damage will be
saved.
wetlands, buildings : str, optional
Names of the wetland and building footprint layers.
wetland_output, building_output : str, optional
Filenames where the flooded wetlands and building footprints
will be saved.
Returns
-------
None
"""
wetlands = params.get('wetlands', None)
buildings = params.get('buildings', None)
all_floods = []
all_wetlands = []
all_buildings = []
with utils.WorkSpace(params['workspace']), utils.OverwriteState(True):
topo_array, zones_array, template = tidegates.process_dem_and_zones(
dem=params['dem'],
zones=params['zones'],
ID_column=params['ID_column']
)
for num, scenario in enumerate(self.make_scenarios(**params)):
fldlyr, wtlndlyr, blgdlyr = self.analyze(
topo_array=topo_array,
zones_array=zones_array,
template=template,
elev=scenario['elev'],
surge=scenario['surge_name'],
slr=scenario['slr'],
num=num,
**params
)
all_floods.append(fldlyr.dataSource)
if wetlands is not None:
all_wetlands.append(wtlndlyr.dataSource)
if buildings is not None:
all_buildings.append(blgdlyr.dataSource)
self.finish_results(
params['flood_output'],
all_floods,
msg="Merging and cleaning up all flood results",
verbose=True,
asMessage=True,
)
if wetlands is not None:
wtld_output = params.get(
'wetland_output',
utils.create_temp_filename(params['wetlands'], prefix='output_', filetype='shape')
)
self.finish_results(
wtld_output,
all_wetlands,
sourcename=params['wetlands'],
msg="Merging and cleaning up all wetlands results",
verbose=True,
asMessage=True,
)
if buildings is not None:
bldg_output = params.get(
'building_output',
utils.create_temp_filename(params['buildings'], prefix='output_', filetype='shape')
)
self.finish_results(
bldg_output,
all_buildings,
sourcename=params['buildings'],
msg="Merging and cleaning up all buildings results",
verbose=True,
asMessage=True,
)
class Flooder(StandardScenarios):
""" ArcGIS Python toolbox to analyze custom flood elevations.
Parameters
----------
None
See also
--------
StandardScenarios
"""
def __init__(self):
# std attributes
super(Flooder, self).__init__()
self.label = "1 - Create flood scenarios"
self.description = dedent("""
Allows the user to create a custom flooding scenario given the
following:
1) A DEM of the coastal area
2) A polygon layer describing the zones of influence of each
tidegate
""")
# lazy properties
self._elevation = None
def _params_as_list(self):
params = [
self.workspace,
self.dem,
self.zones,
self.ID_column,
self.elevation,
self.flood_output,
self.wetlands,
self.wetland_output,
self.buildings,
self.building_output,
]
return params
@property
def elevation(self):
""" The flood elevation for a custom scenario.
"""
if self._elevation is None:
self._elevation = arcpy.Parameter(
displayName="Water Surface Elevation",
name="elevation",
datatype="GPDouble",
parameterType="Required",
direction="Input",
multiValue=True
)
return self._elevation
| {
"repo_name": "Geosyntec/python-tidegates",
"path": "tidegates/toolbox.py",
"copies": "1",
"size": "28613",
"license": "bsd-3-clause",
"hash": 6415376307750490000,
"line_mean": 30.3739035088,
"line_max": 108,
"alpha_frac": 0.5479327578,
"autogenerated": false,
"ratio": 4.538144329896907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5586077087696908,
"avg_score": null,
"num_lines": null
} |
#ArcGIS Server 10.1 service editor
#view your service properties at: http://[your server URL]/arcgis/admin/services/
#put a ?f=json at the end of a service name to see the json properties -
#the JSON is what is being edited here
#Loops through the services in a particular folder and edits the
#listSupportedCRS property (adds "EPSG:3857" - to be google-riffic) for each WMS service in the folder
#created by Doug Curl, Kentucky Geological Survey, 9/12/2013
# For HTTP calls
import httplib, urllib, json
# For system tools
import sys
# For reading passwords without echoing
import getpass
def main(argv=None):
# Ask for server name & port
#serverName = "kgs.uky.edu"
serverName = raw_input("Enter server name (server URL): ")
# Ask for server port - usually 6080:
serverPort = raw_input("Enter server port (usually 6080): ")
#Ask for server admin directory:
serverFolder = raw_input("Enter folder in your service directory to edit services (assumes the root is '/arcgis/admin/services/'): ")
# Ask for admin/publisher user name and password
username = raw_input("Enter admin user name: ")
password = getpass.getpass("Enter password: ")
# Get a token
token = getToken(username, password, serverName, serverPort)
# Get the root info
#serverURL = "/arcgis/admin/services/aasggeothermal/"
serverURL = "/arcgis/admin/services/"+serverFolder+"/"
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", serverURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading server information. " + str(data)
return
else:
print "Processed server information successfully. Now processing folders..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
# Loop through each service in the folder
for item in dataObj['services']:
print item["serviceName"]
print item["type"]
if item["type"] == "MapServer":
service = item["serviceName"]+"."+item["type"]
#sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
print service
serviceURL = serverURL + service
print serviceURL
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to service to get its current JSON definition
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", serviceURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read service information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading service information. " + str(data)
else:
print "Service information read successfully. Now changing properties..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
#print data
for ext in dataObj["extensions"]:
if ext["typeName"] == "WMSServer":
#Edit the supported CRS property - add the one for google for WMS:
ext["properties"]["listSupportedCRS"] = "EPSG:3857"
# Serialize back into JSON
updatedSvcJson = json.dumps(dataObj)
#print updatedSvcJson
# Call the edit operation on the service. Pass in modified JSON.
editSvcURL = serverURL + service + "/edit"
params = urllib.urlencode({'token': token, 'f': 'json', 'service': updatedSvcJson})
httpConn.request("POST", editSvcURL, params, headers)
# Read service edit response
editResponse = httpConn.getresponse()
if (editResponse.status != 200):
httpConn.close()
print "Error while executing edit."
return
else:
editData = editResponse.read()
# Check that data returned is not an error object
if not assertJsonSuccess(editData):
print "Error returned while editing service" + str(editData)
else:
print "Service edited successfully."
#httpConn.close()
#return
else:
# Close the connection to the current service
httpConn.close()
# A function to generate a token given username, password and the adminURL.
def getToken(username, password, serverName, serverPort):
# Token URL is typically http://server[:port]/arcgis/admin/generateToken
tokenURL = "/arcgis/admin/generateToken"
params = urllib.urlencode({'username': username, 'password': password, 'client': 'requestip', 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", tokenURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Error while fetching tokens from admin URL. Please check the URL and try again."
return
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
return
# Extract the token from it
token = json.loads(data)
return token['token']
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print "Error: JSON object returns an error. " + str(obj)
return False
else:
return True
# Script start
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| {
"repo_name": "usgin/AddEPSG3857PrjArcServer",
"path": "AddEPSG3857Projection101.py",
"copies": "1",
"size": "7623",
"license": "mit",
"hash": -6994348278446689000,
"line_mean": 39.3333333333,
"line_max": 137,
"alpha_frac": 0.5697232061,
"autogenerated": false,
"ratio": 4.797356828193832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016148134271518275,
"num_lines": 189
} |
archers = {4, 5, 24, 39, 474, 492}
arows = {54, 97, 245, 246, 315, 316, 317,
318, 319, 320, 321, 322, 328, 360, 363,
364, 367, 372, 375, 378, 381, 385, 466,
470, 471, 475, 476, 477, 478, 485, 503,
504, 505, 507, 509, 510, 511, 512, 514,
516, 517, 518, 519, 521, 522, 523, 524,
525, 537, 538, 540, 541, 627, 628, 746,
747, 786, 787}
etas = {76, 158, 299, 444, 479, 493, 544,
571, 573, 575, 577, 748, 749}
buildings = {10, 12, 14, 18, 19, 20, 30, 31,
32, 33, 45, 47, 49, 50, 51, 68, 70, 71,
72, 79, 82, 84, 86, 87, 101, 103, 104,
105, 109, 110, 116, 129, 130, 131, 132,
133, 137, 141, 142, 150, 153, 179, 199,
208, 209, 210, 234, 235, 236, 276, 278,
345, 445, 446, 463, 464, 465, 481, 482,
483, 484, 498, 562, 563, 564, 565, 584,
585, 586, 587, 597, 598, 599, 605, 606,
607, 608, 609, 610, 611, 612, 613, 614,
615, 616, 617, 618, 619, 620, 621, 624,
625, 626, 655, 684, 685, 689, 690, 696,
712, 713, 714, 715, 716, 717, 718, 719,
738, 739, 740, 741, 742, 743, 785, 805,
806, 807, 808, 826}
castle = {6, 7, 8, 11, 25, 40, 41, 42, 46, 73,
94, 232, 239, 281, 282, 291, 331, 434, 440,
441, 530, 531, 534, 553, 554, 555, 556, 557,
558, 559, 560, 561, 583, 596, 692, 694, 725,
726, 755, 757, 759, 761, 763, 765, 771, 773,
827, 829}
cavalry = {37, 38, 283, 329, 330, 448, 546, 569}
cliffs = {265, 266, 270, 271, 272, 273}
dead = {3, 16, 22, 23, 26, 27, 28, 34, 43, 44, 58,
60, 62, 98, 99, 100, 107, 111, 113, 115, 121,
134, 135, 136, 138, 139, 140, 149, 151, 152,
154, 157, 178, 180, 181, 194, 205, 211, 213,
215, 217, 219, 221, 223, 224, 225, 226, 227,
228, 229, 230, 233, 237, 238, 353, 355, 356,
423, 425, 431, 435, 449, 480, 494, 495, 496,
97, 501, 502, 543, 547, 549, 568, 570, 572, 574,
576, 578, 580, 582, 589, 591, 593, 595, 622,
630, 631, 633, 675, 687, 693, 695, 705, 708,
735, 750, 754, 756, 762, 764, 772, 776, 778,
780, 784, 811, 813, 815, 823, 825, 828, 839,
841, 843, 853}
heroes = {160, 161, 163, 164, 165, 166, 167, 168,
169, 170, 171, 172, 173, 174, 175, 176, 177,
195, 196, 197, 198, 200, 202, 424, 426, 428,
430, 432, 629, 632, 634, 636, 638, 640, 642,
644, 646, 648, 650, 652, 678, 680, 682, 683,
686, 698, 700, 702, 704, 706, 707, 729, 730,
731, 733, 777, 779, 781, 783, 824, 838, 840,
842, 844, 845, 847, 849, 852, 860, 861}
infantry = {74, 75, 77, 93, 358, 359, 473, 567,
751, 752}
market = {84, 110, 116, 128, 137, 204}
misc = {159, 242, 244, 247, 248, 249, 252, 253,
262, 274, 310, 311, 313, 314, 324, 325, 326,
327, 352, 365, 366, 368, 369, 371, 374, 376,
377, 380, 417, 447, 452, 453, 454, 459, 462,
468, 469, 506, 508, 513, 515, 520, 526, 551,
552, 656, 657, 658, 676, 677, 728, 736, 737,
767}
monastery = {125, 285, 286, 287, 288, 289, 290,
292, 294, 295, 296, 297, 298, 300, 301, 302,
303, 304, 305, 306, 307, 308, 775}
others = {48, 53, 59, 65, 66, 69, 89, 96, 102, 126,
143, 144, 145, 146, 147, 148, 241, 333, 334, 335,
336, 337, 338, 339, 340, 341, 389, 396, 450, 451,
455, 456, 457, 458, 499, 594, 600, 601, 602, 603,
604, 623, 688, 709, 710, 711, 720, 721, 722, 723,
744, 745, 810, 812, 814, 816, 817, 818, 819, 820,
821, 822, 833, 835, 837, 851, 854, 855, 856, 857,
858, 859, 862, 863, 864, 865}
ships = {13, 15, 17, 21, 61, 250, 420, 436, 438,
443, 527, 528, 529, 532, 533, 535, 536, 539,
545, 691, 831, 832}
siege = {5, 35, 36, 279, 280, 422, 542, 548, 550, 588}
trees = {284, 348, 349, 350, 351, 399, 400, 401,
402, 403, 404, 405, 406, 407, 408, 409, 410,
411, 413, 414, 415, 809}
units = {4, 5, 5, 6, 7, 8, 11, 24, 25, 35, 36, 39,
40, 41, 42, 46, 73, 74, 75, 77, 93, 94, 125, 204,
232, 239, 279, 280, 281, 282, 291, 331, 358, 359,
422, 434, 440, 441, 473, 474, 492, 530, 531, 534,
542, 548, 550, 553, 554, 555, 556, 557, 558, 559,
560, 561, 567, 583, 588, 596, 692, 694, 725, 726,
751, 752, 755, 757, 759, 761, 763, 765, 771, 773,
775, 827, 829}
villagers = {56, 57, 83, 118, 120, 122, 123, 124, 156,
206, 207, 212, 214, 216, 218, 220, 222, 259, 293,
354, 579, 581, 590, 592}
walls = {63, 64, 67, 78, 80, 81, 85, 88, 90, 91, 92,
95, 117, 155, 487, 488, 490, 491, 659, 660, 661,
662, 663, 664, 665, 666, 667, 668, 669, 670, 671,
672, 673, 674, 789, 790, 791, 792, 793, 794, 795,
796, 797, 798, 799, 800, 801, 802, 803, 804}
gold = 66
stone = 102
| {
"repo_name": "dderevjanik/agescx",
"path": "agescx/groups/groups.py",
"copies": "1",
"size": "4493",
"license": "mit",
"hash": -7677254699939410000,
"line_mean": 43.4851485149,
"line_max": 54,
"alpha_frac": 0.5464055197,
"autogenerated": false,
"ratio": 1.8894028595458368,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.7836250569318088,
"avg_score": 0.01991156198554963,
"num_lines": 101
} |
#arch -i386 python
import Skype4Py
import time
import re
import os
import glob
import sys
# import all the reponse classes from the reponses folder
os.chdir("responses")
sys.path.append(os.getcwd())
response_modules = [n[:-3] for n in glob.glob("*.py") if n[0] != '_']
#print response_modules
module_dict = {}
for module_name in response_modules:
module = __import__(module_name)
for item_name in dir(module):
try:
new_action = getattr(module, item_name)()
module_dict[module_name] = new_action
except:
pass
#print module_dict
class SkypeBot(object):
"""
To add additional reponses to the skype bot:
1. decide on the command you want to implement: e.g !sayhi
2. Create a new file and class in the responses folder: sayhi.py
3. Implement a reponse method.
4. Update the commands dictionary below. The key is your regexp to match. Value is the filename.
5. Go to response/help.py and put in instructions on how to use your command
"""
commands = {
#"!help *(.*)": "help",
"^!help *(.*)": "help",
"^!popcorn *(.*)": "popcorn",
"^!happy *(.*)": "happy",
"^!sad *(.*)": "sad",
"^!welldone *(.*)": "well_done",
"^!be *(.*)":"be",
}
def __init__(self):
self.skype = Skype4Py.Skype(Events=self)
self.skype.FriendlyName = "SK Skype Bot"
self.skype.Attach()
def __del__(self):
# may want to to save state across sessions
pass
def AttachmentStatus(self, status):
if status == Skype4Py.apiAttachAvailable:
self.skype.Attach()
def MessageStatus(self, msg, status):
if status == Skype4Py.cmsReceived:
#if "Dev Chat" in msg.Chat.Topic:
#if msg.Chat.Type in (Skype4Py.chatTypeDialog, Skype4Py.chatTypeLegacyDialog):
for regexp, target in self.commands.items():
match = re.match(regexp, msg.Body, re.IGNORECASE)
if match:
msg.MarkAsSeen()
reply = module_dict[target].response(*match.groups())
if reply:
msg.Chat.SendMessage(reply)
if target == "be": # say 2 things
reply2 = module_dict[target].response(*match.groups())
msg.Chat.SendMessage(reply2)
break
if __name__ == "__main__":
bot = SkypeBot()
while True:
time.sleep(1.0)
| {
"repo_name": "james-huang/sk_skype_bot",
"path": "SKSkypeBot.py",
"copies": "1",
"size": "2276",
"license": "mit",
"hash": -6642915616393689000,
"line_mean": 26.756097561,
"line_max": 99,
"alpha_frac": 0.6234622144,
"autogenerated": false,
"ratio": 3.347058823529412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9387014344148088,
"avg_score": 0.016701338756264617,
"num_lines": 82
} |
# architect.py -- the source code to diagram designer/editor
import sys, re, dia, shelve
from pprint import pprint
def create_block_group(block_list):
grp = []
conn_dict = {}
# create block group
x = 100
y = 0
for block in block_list:
# pprint(block)
object_type = dia.get_object_type("UML - Node")
obj, _, _ = object_type.create(x, y)
# print(obj.properties['elem_width'].value, obj.properties['elem_height'].value)
y += obj.properties['elem_height'].value + 2
index = block['index']
obj.properties['name'] = str(index)
grp.append(obj)
conn_dict[index] = obj
# create connections
for block in block_list:
if 'dest_list' in block:
for dest in block['dest_list']:
object_type = dia.get_object_type("UML - Transition")
obj, h1, h2 = object_type.create(0, 0)
src_obj = conn_dict[block['index']]
dest_obj = conn_dict[dest['index']]
h1.connect(src_obj.connections[6])
h2.connect(dest_obj.connections[1])
grp.append(obj)
# create stmt list
for block in block_list:
index = block['index']
stmt_list = block['stmt_list']
for stmt in stmt_list:
# pprint(stmt)
object_type = dia.get_object_type("UML - State")
obj, h1, h2 = object_type.create(0, 0)
# obj.properties['name'] = "{0}:".format(stmt['type'])
grp.append(obj)
pass
return dia.group_create(grp)
def import_python_shelve_dat(filename, diagram_data):
d = shelve.open(filename)
source = d['source']
# type_list
layer = diagram_data.add_layer('fun_list')
diagram_data.set_active_layer(layer)
x = 0
y = 0
for fun in source['fun_list']:
object_type = dia.get_object_type("UML - Node")
obj, _, _ = object_type.create(x, y)
obj.properties['name'] = fun['name']
grp = create_block_group(fun['block_list'])
grp = dia.group_create([obj, grp])
layer.add_object(grp)
x += 10
#y += 10
diagram_data.update_extents()
d.close()
def architect_new(data, flags):
diagram = dia.new('architect.dia')
data = diagram.data
diagram.display()
diagram.flush()
return data
pass
def architect_update(data, flags):
diagram = dia.active_display().diagram
layer = diagram.data.active_layer
# print(dir(dia))
objs = layer.objects
for obj in objs:
print(obj.type)
for conn in set(obj.connections):
print(conn.connected, conn.object, conn.pos)
props = obj.properties
print(props.keys())
if props.has_key('text'):
text = props['text']
obj = text.value
print(obj.text, obj.color)
for key in props.keys():
print(key, props[key].name, props[key].value)
def architect_demo(data, flags):
pass
dia.register_import("python shelve dat", "dat", import_python_shelve_dat)
dia.register_action("Architect new", "Architect new", "/ToolboxMenu/File/FileExtensionStart", architect_new);
dia.register_action("Architect update", "Architect update", "/DisplayMenu/Dialogs/DialogsExtensionStart", architect_update);
dia.register_action("Architect demo", "Architect demo", "/DisplayMenu/Dialogs/DialogsExtensionStart", architect_demo);
| {
"repo_name": "liunx/MachineLearn",
"path": "dia/architect.py",
"copies": "1",
"size": "3466",
"license": "mit",
"hash": 7192325591859079000,
"line_mean": 29.6725663717,
"line_max": 124,
"alpha_frac": 0.5877091748,
"autogenerated": false,
"ratio": 3.452191235059761,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4539900409859761,
"avg_score": null,
"num_lines": null
} |
""" Architecture description for web assembly """
from .. import ir
from ..arch.arch import VirtualMachineArchitecture
from ..arch.stack import FramePointerLocation
from ..arch.registers import Register, RegisterClass
from ..arch.arch_info import ArchInfo, TypeInfo
# Define 'registers' that are actually wasm local variables:
class I32Register(Register):
bitsize = 32
class I64Register(Register):
bitsize = 64
class F32Register(Register):
bitsize = 32
class F64Register(Register):
bitsize = 64
# TODO: Note: having u64 in an i64 register might be wrong..
register_classes = [
RegisterClass(
"i32",
[ir.i8, ir.u8, ir.i16, ir.u16, ir.i32, ir.ptr],
I32Register,
None,
),
RegisterClass("i64", [ir.u32, ir.i64, ir.u64], I64Register, None),
RegisterClass("f32", [ir.f32], F32Register, None),
RegisterClass("f64", [ir.f64], F64Register, None),
]
class WasmArchitecture(VirtualMachineArchitecture):
""" Web assembly architecture description """
name = "wasm"
def __init__(self):
super().__init__()
self.info = ArchInfo(
type_infos={
ir.i8: TypeInfo(1, 1),
ir.u8: TypeInfo(1, 1),
ir.i16: TypeInfo(2, 1),
ir.u16: TypeInfo(2, 1),
ir.i32: TypeInfo(4, 1),
# NOTE: u32 is stored in wasm i64 type:
ir.u32: TypeInfo(8, 1),
ir.i64: TypeInfo(8, 1),
ir.u64: TypeInfo(8, 1),
ir.f32: TypeInfo(4, 1),
ir.f64: TypeInfo(8, 1),
"int": ir.i32,
"ptr": ir.i32,
},
register_classes=register_classes,
)
self.fp_location = FramePointerLocation.BOTTOM
| {
"repo_name": "windelbouwman/ppci-mirror",
"path": "ppci/wasm/arch.py",
"copies": "1",
"size": "1792",
"license": "bsd-2-clause",
"hash": 3159779105112243700,
"line_mean": 26.1515151515,
"line_max": 70,
"alpha_frac": 0.5725446429,
"autogenerated": false,
"ratio": 3.4863813229571985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4558925965857198,
"avg_score": null,
"num_lines": null
} |
""" Architecture information carriers.
This module contains classes with information about a specific target.
The information present:
- endianness
- type sizes and alignment
- int size for the machine
"""
import enum
from .. import ir
class Endianness(enum.Enum):
""" Define endianness as little or big """
LITTLE = 1
BIG = 1000
class TypeInfo:
""" Target specific type information """
def __init__(self, size, alignment):
self.size = size
self.alignment = alignment
class ArchInfo:
""" A collection of information for language frontends """
def __init__(
self,
type_infos=None,
endianness=Endianness.LITTLE,
register_classes=(),
):
self.type_infos = type_infos
assert isinstance(endianness, Endianness)
self.endianness = endianness
self.register_classes = register_classes
self._registers_by_name = {}
mapping = {}
for register_class in self.register_classes:
for ty in register_class.ir_types:
if ty in mapping:
raise ValueError("Duplicate type assignment {}".format(ty))
mapping[ty] = register_class.typ
if register_class.registers:
for register in register_class.registers:
self._registers_by_name[register.name] = register
self.value_classes = mapping
def get_register(self, name):
""" Retrieve the machine register by name. """
return self._registers_by_name[name]
def has_register(self, name):
""" Test if this architecture has a register with the given name. """
return name in self._registers_by_name
def get_type_info(self, typ):
""" Retrieve type information for the given type """
if isinstance(typ, str):
typ = self.type_infos[typ]
assert isinstance(typ, ir.Typ)
return self.type_infos[typ]
def get_size(self, typ):
""" Get the size (in bytes) of the given type """
return self.get_type_info(typ).size
def get_alignment(self, typ):
""" Get the alignment for the given type """
return self.get_type_info(typ).alignment
| {
"repo_name": "windelbouwman/ppci-mirror",
"path": "ppci/arch/arch_info.py",
"copies": "1",
"size": "2234",
"license": "bsd-2-clause",
"hash": 3542890067626368000,
"line_mean": 28.012987013,
"line_max": 79,
"alpha_frac": 0.6145926589,
"autogenerated": false,
"ratio": 4.296153846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 77
} |
## Architecture
# Take in inputs from the screen and preprocess them
# Pass them into an NN
# Update the weights of the NN using gradient descent
# weights['1'] - Matrix that holds weights of pixels passing into hidden layer. Dimensions: [200 x 80 x 80] -> [200 x 6400]
# weights['2'] - Matrix that holds weights of hidden layer passing into output. Dimensions: [1 x 200]
# Process is:
# processed_observations = image vector - [6400 x 1] array
# Compute hidden_layer_values = weights['1'] dot processed_observations ([200 x 6400] dot [6400 x 1]) -> [200 x 1] - this gives initial activation values.
# Next we need to transform those either via a sigmoid or an ReLU of some sort. Let's use ReLU
# ReLU(hidden_layer_values)
# Next we need to pass this one layer further
# output_layer_value = weights['2'] dot hidden_layer_values ([1 x 200] dot [200 x 1] -> [1 x 1])
# Now our output layer is the probability of going up or down. Let's make sure this output is between 0 and 1 by passing it through a sigmoid
# p = sigmoid(output_layer_value)
# Learning after round has finished:
# Figure out the result
# Compute the error
# Use the error to calculate the gradient
# The below dimensions all assume we had exactly 10 frames in the round (not necessarily true!)
# dC_dw2 = hidden_layer_values^T dot gradient_log_p ([1 x 2000] dot [2000 x 1] -> 1x1)
# delta_1 = gradient_log_p outer_product weights['2'] = [2000 x 1] outer_product [1 x 200] ([2000 x 200])
# dC_dw1 = delta_1^T dot input_observations ([200 x 2000]x dot [2000 x 64000] -> [200 x 64000])
# After some batch size of rounds has finished,
# Use rmsprop to move weights['1'] and weights['2'] in the direction of the gradient
# Repeat!
import gym
import numpy as np
def downsample(image):
# Take only alternate pixels - basically halves the resolution of the image (which is fine for us)
return image[::2, ::2, :]
def remove_color(image):
"""Convert all color (RGB is the third dimension in the image)"""
return image[:, :, 0]
def remove_background(image):
image[image == 144] = 0
image[image == 109] = 0
return image
def preprocess_observations(input_observation, prev_processed_observation, input_dimensions):
""" convert the 210x160x3 uint8 frame into a 6400 float vector """
processed_observation = input_observation[35:195] # crop
processed_observation = downsample(processed_observation)
processed_observation = remove_color(processed_observation)
processed_observation = remove_background(processed_observation)
processed_observation[processed_observation != 0] = 1 # everything else (paddles, ball) just set to 1
# Convert from 80 x 80 matrix to 6400 x 1 matrix
processed_observation = processed_observation.astype(np.float).ravel()
# subtract the previous frame from the current one so we are only processing on changes in the game
if prev_processed_observation is not None:
input_observation = processed_observation - prev_processed_observation
else:
input_observation = np.zeros(input_dimensions)
# store the previous frame so we can subtract from it next time
prev_processed_observations = processed_observation
return input_observation, prev_processed_observations
def sigmoid(x):
return 1.0/(1.0 + np.exp(-x))
def relu(vector):
vector[vector < 0] = 0
return vector
def apply_neural_nets(observation_matrix, weights):
""" Based on the observation_matrix and weights, compute the new hidden layer values and the new output layer values"""
hidden_layer_values = np.dot(weights['1'], observation_matrix)
hidden_layer_values = relu(hidden_layer_values)
output_layer_values = np.dot(hidden_layer_values, weights['2'])
output_layer_values = sigmoid(output_layer_values)
return hidden_layer_values, output_layer_values
def choose_action(probability):
random_value = np.random.uniform()
if random_value < probability:
# signifies up in openai gym
return 2
else:
# signifies down in openai gym
return 3
def compute_gradient(gradient_log_p, hidden_layer_values, observation_values, weights):
""" See here: http://neuralnetworksanddeeplearning.com/chap2.html"""
delta_L = gradient_log_p
dC_dw2 = np.dot(hidden_layer_values.T, delta_L).ravel()
delta_l2 = np.outer(delta_L, weights['2'])
delta_l2 = relu(delta_l2)
dC_dw1 = np.dot(delta_l2.T, observation_values)
return {
'1': dC_dw1,
'2': dC_dw2
}
def update_weights(weights, expectation_g_squared, g_dict, decay_rate, learning_rate):
""" See here: http://sebastianruder.com/optimizing-gradient-descent/index.html#rmsprop"""
epsilon = 1e-5
for layer_name in weights.keys():
g = g_dict[layer_name]
expectation_g_squared[layer_name] = decay_rate * expectation_g_squared[layer_name] + (1 - decay_rate) * g**2
weights[layer_name] += (learning_rate * g)/(np.sqrt(expectation_g_squared[layer_name] + epsilon))
g_dict[layer_name] = np.zeros_like(weights[layer_name]) # reset batch gradient buffer
def discount_rewards(rewards, gamma):
""" Actions you took 20 steps before the end result are less important to the overall result than an action you took a step ago.
This implements that logic by discounting the reward on previous actions based on how long ago they were taken"""
discounted_rewards = np.zeros_like(rewards)
running_add = 0
for t in reversed(range(0, rewards.size)):
if rewards[t] != 0:
running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
def discount_with_rewards(gradient_log_p, episode_rewards, gamma):
""" discount the gradient with the normalized rewards """
discounted_episode_rewards = discount_rewards(episode_rewards, gamma)
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_episode_rewards -= np.mean(discounted_episode_rewards)
discounted_episode_rewards /= np.std(discounted_episode_rewards)
return gradient_log_p * discounted_episode_rewards
def main():
env = gym.make("Pong-v0")
observation = env.reset() # This gets us the image
# hyperparameters
episode_number = 0
batch_size = 10
gamma = 0.99 # discount factor for reward
decay_rate = 0.99
num_hidden_layer_neurons = 200
input_dimensions = 80 * 80
learning_rate = 1e-4
episode_number = 0
reward_sum = 0
running_reward = None
prev_processed_observations = None
weights = {
'1': np.random.randn(num_hidden_layer_neurons, input_dimensions) / np.sqrt(input_dimensions),
'2': np.random.randn(num_hidden_layer_neurons) / np.sqrt(num_hidden_layer_neurons)
}
# To be used with rmsprop algorithm (http://sebastianruder.com/optimizing-gradient-descent/index.html#rmsprop)
expectation_g_squared = {}
g_dict = {}
for layer_name in weights.keys():
expectation_g_squared[layer_name] = np.zeros_like(weights[layer_name])
g_dict[layer_name] = np.zeros_like(weights[layer_name])
episode_hidden_layer_values, episode_observations, episode_gradient_log_ps, episode_rewards = [], [], [], []
while True:
env.render()
processed_observations, prev_processed_observations = preprocess_observations(observation, prev_processed_observations, input_dimensions)
hidden_layer_values, up_probability = apply_neural_nets(processed_observations, weights)
episode_observations.append(processed_observations)
episode_hidden_layer_values.append(hidden_layer_values)
action = choose_action(up_probability)
# carry out the chosen action
observation, reward, done, info = env.step(action)
reward_sum += reward
episode_rewards.append(reward)
# see here: http://cs231n.github.io/neural-networks-2/#losses
fake_label = 1 if action == 2 else 0
loss_function_gradient = fake_label - up_probability
episode_gradient_log_ps.append(loss_function_gradient)
if done: # an episode finished
episode_number += 1
# Combine the following values for the episode
episode_hidden_layer_values = np.vstack(episode_hidden_layer_values)
episode_observations = np.vstack(episode_observations)
episode_gradient_log_ps = np.vstack(episode_gradient_log_ps)
episode_rewards = np.vstack(episode_rewards)
# Tweak the gradient of the log_ps based on the discounted rewards
episode_gradient_log_ps_discounted = discount_with_rewards(episode_gradient_log_ps, episode_rewards, gamma)
gradient = compute_gradient(
episode_gradient_log_ps_discounted,
episode_hidden_layer_values,
episode_observations,
weights
)
# Sum the gradient for use when we hit the batch size
for layer_name in gradient:
g_dict[layer_name] += gradient[layer_name]
if episode_number % batch_size == 0:
update_weights(weights, expectation_g_squared, g_dict, decay_rate, learning_rate)
episode_hidden_layer_values, episode_observations, episode_gradient_log_ps, episode_rewards = [], [], [], [] # reset values
observation = env.reset() # reset env
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print('resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward))
reward_sum = 0
prev_processed_observations = None
main()
| {
"repo_name": "Froskekongen/oslodatascience-rl",
"path": "pavlo/me_pong.py",
"copies": "1",
"size": "9857",
"license": "mit",
"hash": -2455530471388638700,
"line_mean": 43.0044642857,
"line_max": 154,
"alpha_frac": 0.6828649691,
"autogenerated": false,
"ratio": 3.7224320241691844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4905296993269184,
"avg_score": null,
"num_lines": null
} |
"""Archive and extract tars."""
import logging
import tarfile
from pypyr.errors import KeyNotInContextError
# logger means the log level will be set correctly
logger = logging.getLogger(__name__)
def run_step(context):
"""Archive and/or extract tars with or without compression.
Args:
context: dictionary-like. Mandatory.
Expects the following context:
tar:
extract:
- in: /path/my.tar
out: /out/path
archive:
- in: /dir/to/archive
out: /out/destination.tar
format: ''
tar['format'] - if not specified, defaults to lzma/xz
Available options:
- '' - no compression
- gz (gzip)
- bz2 (bzip2)
- xz (lzma)
This step will run whatever combination of Extract and Archive you specify.
Regardless of combination, execution order is Extract, Archive.
Source and destination paths support {key} string interpolation.
Never extract archives from untrusted sources without prior inspection.
It is possible that files are created outside of path, e.g. members that
have absolute filenames starting with "/" or filenames with two dots "..".
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
found_at_least_one = False
context.assert_key_has_value('tar', __name__)
tar_context = context.get_formatted('tar')
if tar_context.get('extract', None):
found_at_least_one = True
tar_extract(tar_context)
if tar_context.get('archive', None):
found_at_least_one = True
tar_archive(tar_context)
if not found_at_least_one:
# This will raise exception on first item with a problem.
raise KeyNotInContextError('pypyr.steps.tar must have either extract '
'or archive specified under the tar key. '
'Or both of these. It has neither.')
logger.debug("done")
def get_file_mode_for_reading(context_tar):
"""Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format.
"""
format = context_tar.get('format', None)
if format or format == '':
mode = f"r:{format}"
else:
mode = 'r:*'
return mode
def get_file_mode_for_writing(context_tar):
"""Get file mode for writing from tar['format'].
This should return w:, w:gz, w:bz2 or w:xz. If user specified something
wacky in tar.Format, that's their business.
"""
format = context_tar.get('format', None)
# slightly weird double-check because falsy format could mean either format
# doesn't exist in input, OR that it exists and is empty. Exists-but-empty
# has special meaning - default to no compression.
if format or format == '':
mode = f"w:{format}"
else:
mode = 'w:xz'
return mode
def tar_archive(context_tar):
"""Archive specified path to a tar archive.
Args:
context_tar: dictionary-like. context is mandatory.
context['tar']['archive'] must exist. It's a dictionary.
keys are the paths to archive.
values are the destination output paths.
Example:
tar:
archive:
- in: path/to/dir
out: path/to/destination.tar.xs
- in: another/my.file
out: ./my.tar.xs
This will archive directory path/to/dir to path/to/destination.tar.xs,
and also archive file another/my.file to ./my.tar.xs
"""
logger.debug("start")
mode = get_file_mode_for_writing(context_tar)
for item in context_tar['archive']:
# value is the destination tar. Allow string interpolation.
destination = item['out']
# key is the source to archive
source = item['in']
with tarfile.open(destination, mode) as archive_me:
logger.debug("Archiving '%s' to '%s'", source, destination)
archive_me.add(source, arcname='.')
logger.info("Archived '%s' to '%s'", source, destination)
logger.debug("end")
def tar_extract(context_tar):
"""Extract all members of tar archive to specified path.
Args:
context_tar: dictionary-like. context is mandatory.
context['tar']['extract'] must exist. It's a dictionary.
keys are the path to the tar to extract.
values are the destination paths.
Example:
tar:
extract:
- in: path/to/my.tar.xs
out: /path/extract/here
- in: another/tar.xs
out: .
This will extract path/to/my.tar.xs to /path/extract/here, and also
extract another/tar.xs to $PWD.
"""
logger.debug("start")
mode = get_file_mode_for_reading(context_tar)
for item in context_tar['extract']:
# in is the path to the tar to extract. Allows string interpolation.
source = item['in']
# out is the outdir, dhur. Allows string interpolation.
destination = item['out']
with tarfile.open(source, mode) as extract_me:
logger.debug("Extracting '%s' to '%s'", source, destination)
extract_me.extractall(destination)
logger.info("Extracted '%s' to '%s'", source, destination)
logger.debug("end")
| {
"repo_name": "pypyr/pypyr-cli",
"path": "pypyr/steps/tar.py",
"copies": "1",
"size": "5642",
"license": "apache-2.0",
"hash": -4867263574476197000,
"line_mean": 30.6966292135,
"line_max": 79,
"alpha_frac": 0.5937610776,
"autogenerated": false,
"ratio": 4.163837638376384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 178
} |
"""Archive a road conditions plot every 5 minutes.
Called from RUN_5MIN.sh
"""
import datetime
import os
import sys
import subprocess
import tempfile
from pyiem.util import utc, logger
import pytz
import requests
LOG = logger()
def do(now):
"""Run for a given timestamp."""
fn = now.strftime(
"/mesonet/ARCHIVE/data/%Y/%m/%d/iaroads/iaroads_%H%M.png"
)
if os.path.isfile(fn):
LOG.debug("skipping as file %s exists", fn)
return
LOG.debug("running for %s", now)
# CAREFUL, web takes valid in CST/CDT
service = now.astimezone(pytz.timezone("America/Chicago")).strftime(
"http://iem.local/roads/iem.php?valid=%Y-%m-%d%%20%H:%M"
)
routes = "ac" if (utc() - now) < datetime.timedelta(minutes=10) else "a"
if routes == "ac":
service = "http://iem.local/roads/iem.php"
req = requests.get(service, timeout=60)
tmpfd = tempfile.NamedTemporaryFile(delete=False)
tmpfd.write(req.content)
tmpfd.close()
pqstr = "plot %s %s iaroads.png iaroads/iaroads_%s.png png" % (
routes,
now.strftime("%Y%m%d%H%M"),
now.strftime("%H%M"),
)
LOG.debug(pqstr)
subprocess.call("pqinsert -i -p '%s' %s" % (pqstr, tmpfd.name), shell=True)
os.unlink(tmpfd.name)
def main(argv):
"""Go Main Go"""
now = utc(*[int(i) for i in argv[1:6]])
for offset in [0, 1440, 1440 + 720]:
valid = now - datetime.timedelta(minutes=offset)
do(valid)
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/iem",
"path": "scripts/roads/archive_roadsplot.py",
"copies": "1",
"size": "1523",
"license": "mit",
"hash": -4381844053349975000,
"line_mean": 24.813559322,
"line_max": 79,
"alpha_frac": 0.609980302,
"autogenerated": false,
"ratio": 3.0039447731755424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9113925075175542,
"avg_score": 0,
"num_lines": 59
} |
""" ArchiveBot wpull 2.x plugin (replaces 1.x hooks)
This module implements the integration layer between ArchiveBot and wpull. In
particular, it handles ignore settings, settings changes, dashboard reporting,
and aborts.
"""
# The ArchiveBot plugin will be split across multiple modules, but
# sys.path for plugins does not include the plugin file's directory.
# We add that here.
import os
import sys
import random
import time
import logging
import re
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
# Import wpull bits used by the plugin.
from wpull.application.hook import Actions
from wpull.application.plugin import WpullPlugin, PluginFunctions, hook, event
from wpull.pipeline.app import AppSession
from wpull.pipeline.item import URLRecord
from wpull.pipeline.session import ItemSession
from wpull.stats import Statistics
from wpull.url import URLInfo
from archivebot import shared_config
from archivebot.control import Control
from archivebot.wpull import settings as mod_settings
# dupespotter plugin:
import archivebot.wpull.plugin
def _extract_response_code(item_session: ItemSession) -> int:
statcode = 0
try:
# duck typing: assume the response is
# wpull.protocol.http.request.Response
statcode = item_session.response.status_code
except (AttributeError, KeyError):
pass
try:
# duck typing: assume the response is
# wpull.protocol.ftp.request.Response
statcode = item_session.response.reply.code
except (AttributeError, KeyError):
pass
return statcode
def _extract_item_size(item_session: ItemSession) -> int:
try:
return item_session.response.body.size()
except (Exception):
return 0
def is_error(statcode, err):
'''
Determines whether a given status code/error code combination should be
flagged as an error.
'''
# 5xx: yes
if statcode >= 500:
return True
# Response code zero with non-OK wpull code: yes
if err != 'OK':
return True
# Could be an error, but we don't know it as such
return False
def is_warning(statcode):
'''
Determines whether a given status code/error code combination should be
flagged as a warning.
'''
return statcode >= 400 and statcode < 500
class ArchiveBotPlugin(WpullPlugin):
last_age = 0
ident = None
redis_url = None
log_key = None
log_channel = None
pipeline_channel = None
control = None
settings = None
settings_listener = None
logger = None
def log_ignore(self, url, pattern, source):
packet = dict(
ts=time.time(),
url=url,
pattern=pattern,
type='ignore',
source=source
)
self.control.log(packet, self.ident, self.log_key)
def maybe_log_ignore(self, url, pattern, source):
if not self.settings.suppress_ignore_reports():
self.log_ignore(url, pattern, source)
self.logger.info('Ignore %s using pattern %s', url, pattern)
def log_result(self, url, statcode, error):
packet = dict(
ts=time.time(),
url=url,
response_code=statcode,
wget_code=error,
is_error=is_error(statcode, error),
is_warning=is_warning(statcode),
type='download'
)
self.control.log(packet, self.ident, self.log_key)
def print_log(self, *args):
print(*args)
sys.stdout.flush()
self.logger.info(' '.join(str(arg) for arg in args))
def handle_result(self, item_session: ItemSession, error_info:
BaseException=None):
error = 'OK'
statcode = _extract_response_code(item_session)
self.control.update_bytes_downloaded(_extract_item_size(item_session))
# Check raw and normalized URL against ignore list
pattern = self.settings.ignore_url(item_session.url_record)
if pattern:
self.maybe_log_ignore(item_session.url_record.url, pattern, 'handle_result')
return Actions.FINISH
if error_info:
error = str(error_info)
self.log_result(item_session.url_record.url, statcode, error)
settings_age = self.settings.age()
if self.last_age < settings_age:
self.last_age = settings_age
self.print_log("Settings updated: ", self.settings.inspect())
self.app_session.factory['PipelineSeries'].concurrency = self.settings.concurrency()
# See that the settings listener is online
self.settings_listener.check()
if self.settings.abort_requested():
self.print_log("Wpull terminating on bot command")
while True:
try:
self.control.mark_aborted(self.ident)
#Since wpull does not call .deactivate() as at 2.0.1:
self.settings_listener.stop()
break
except ConnectionError as err:
self.print_log("Failed to mark job aborted in controller:"
" {}".format(err))
time.sleep(5)
return Actions.STOP
return Actions.NORMAL
def activate(self):
self.ident = os.environ['ITEM_IDENT']
self.redis_url = os.environ['REDIS_URL']
self.log_key = os.environ['LOG_KEY']
self.log_channel = shared_config.log_channel()
self.pipeline_channel = shared_config.pipeline_channel()
self.control = Control(self.redis_url, self.log_channel, self.pipeline_channel)
self.settings = mod_settings.Settings()
self.settings_listener = mod_settings.Listener(self.redis_url, self.settings,
self.control, self.ident)
self.settings_listener.start()
self.last_age = 0
self.logger = logging.getLogger('archivebot.pipeline.wpull_plugin')
self.logger.info('wpull plugin initialization complete for job ID '
'{}'.format(self.ident))
archivebot.wpull.plugin.activate(self.app_session)
self.logger.info('wpull dupespotter subsystem loaded for job ID '
'{}'.format(self.ident))
super().activate()
self.logger.info('wpull plugin activated')
def deactivate(self):
super().deactivate()
self.logger.info('stopping settings listener')
self.settings_listener.stop()
self.logger.info('wpull plugin deactivated')
@hook(PluginFunctions.accept_url)
def accept_url(self,
item_session: ItemSession,
verdict: bool,
reasons: dict):
url = item_session.url_record.url_info
if (url.scheme not in ['https', 'http', 'ws', 'wss', 'ftp', 'gopher']
or url.path is None
or url.host in [None, '']):
return False
pattern = self.settings.ignore_url(item_session.url_record)
if pattern:
self.maybe_log_ignore(url.raw, pattern, 'accept_url')
return False
return verdict
@event(PluginFunctions.queued_url)
def queued_url(self, url_info: URLInfo):
# Report one URL added to the queue
self.control.update_items_queued(1)
@event(PluginFunctions.dequeued_url)
def dequeued_url(self, url_info: URLInfo, record_info: URLRecord):
# Report one URL removed from the queue
self.control.update_items_downloaded(1)
@hook(PluginFunctions.handle_pre_response)
def handle_pre_response(self, item_session: ItemSession):
url = item_session.url_record.url_info
try:
# duck typing: assume it was HTTP-like
# like wpull.protocol.http.request.Response
response = item_session.response
ICY_FIELD_PATTERN = re.compile('Icy-|Ice-|X-Audiocast-')
ICY_VALUE_PATTERN = re.compile('icecast', re.IGNORECASE)
if response.version is 'ICY':
self.maybe_log_ignore(url, '[icy version]', 'handle_pre_response')
return Actions.FINISH
for field, value in response.fields.get_all():
if ICY_FIELD_PATTERN.match(field):
self.maybe_log_ignore(url.raw, '[icy version]',
'handle_pre_response')
return Actions.FINISH
if field == 'Server' and ICY_VALUE_PATTERN.match(value):
self.maybe_log_ignore(url.raw, '[icy server]',
'handle_pre_response')
return Actions.FINISH
except (AttributeError, KeyError):
pass
return Actions.NORMAL
@hook(PluginFunctions.handle_response)
def handle_response(self, item_session: ItemSession):
return self.handle_result(item_session)
@hook(PluginFunctions.handle_error)
def handle_error(self, item_session: ItemSession, error: BaseException):
return self.handle_result(item_session, error)
@event(PluginFunctions.finishing_statistics)
def finishing_statistics(self,
app_session: AppSession,
statistics: Statistics):
self.print_log(" ", statistics.size, "bytes.")
@hook(PluginFunctions.exit_status)
def exit_status(self, app_session: AppSession, exit_code: int):
self.logger.info('Advising control task {} and settings listener to stop '
'pending termination for ident '
'{}'.format(self.control, self.ident))
self.control.advise_exiting()
self.settings_listener.stop()
return exit_code
@hook(PluginFunctions.wait_time)
def wait_time(self, seconds: float, item_session: ItemSession, error):
sl, sh = self.settings.delay_time_range()
return random.uniform(sl, sh) / 1000
# vim: ts=4:sw=4:et:tw=78
| {
"repo_name": "falconkirtaran/ArchiveBot",
"path": "pipeline/archive_bot_plugin.py",
"copies": "3",
"size": "10056",
"license": "mit",
"hash": -6516339720512217000,
"line_mean": 32.0789473684,
"line_max": 96,
"alpha_frac": 0.6134645982,
"autogenerated": false,
"ratio": 4.09113100081367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019894659610626662,
"num_lines": 304
} |
# archivebox init
# archivebox add
import os
import subprocess
from pathlib import Path
import json, shutil
import sqlite3
from archivebox.config import OUTPUT_PERMISSIONS
from .fixtures import *
def test_init(tmp_path, process):
assert "Initializing a new ArchiveBox" in process.stdout.decode("utf-8")
def test_update(tmp_path, process):
os.chdir(tmp_path)
update_process = subprocess.run(['archivebox', 'init'], capture_output=True)
assert "updating existing ArchiveBox" in update_process.stdout.decode("utf-8")
def test_add_link(tmp_path, process, disable_extractors_dict):
disable_extractors_dict.update({"USE_WGET": "true"})
os.chdir(tmp_path)
add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'],
capture_output=True, env=disable_extractors_dict)
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
assert "index.json" in [x.name for x in archived_item_path.iterdir()]
with open(archived_item_path / "index.json", "r", encoding="utf-8") as f:
output_json = json.load(f)
assert "Example Domain" == output_json['history']['title'][0]['output']
with open(archived_item_path / "index.html", "r", encoding="utf-8") as f:
output_html = f.read()
assert "Example Domain" in output_html
def test_add_link_support_stdin(tmp_path, process, disable_extractors_dict):
disable_extractors_dict.update({"USE_WGET": "true"})
os.chdir(tmp_path)
stdin_process = subprocess.Popen(["archivebox", "add"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=disable_extractors_dict)
stdin_process.communicate(input="http://127.0.0.1:8080/static/example.com.html".encode())
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
assert "index.json" in [x.name for x in archived_item_path.iterdir()]
with open(archived_item_path / "index.json", "r", encoding="utf-8") as f:
output_json = json.load(f)
assert "Example Domain" == output_json['history']['title'][0]['output']
def test_correct_permissions_output_folder(tmp_path, process):
index_files = ['index.sqlite3', 'archive']
for file in index_files:
file_path = tmp_path / file
assert oct(file_path.stat().st_mode)[-3:] == OUTPUT_PERMISSIONS
def test_correct_permissions_add_command_results(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
archived_item_path = list(tmp_path.glob('archive/**/*'))[0]
for path in archived_item_path.iterdir():
assert oct(path.stat().st_mode)[-3:] == OUTPUT_PERMISSIONS
def test_collision_urls_different_timestamps(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True,
env=disable_extractors_dict)
archive_folders = [x.name for x in (tmp_path / "archive").iterdir()]
first_archive = tmp_path / "archive" / str(min([float(folder) for folder in archive_folders]))
json_index = str(first_archive / "index.json")
with open(json_index, "r", encoding="utf-8") as f:
link_details = json.loads(f.read())
link_details["url"] = "http://127.0.0.1:8080/static/iana.org.html"
with open(json_index, "w", encoding="utf-8") as f:
json.dump(link_details, f)
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
# 1 from duplicated url, 1 from corrupted index
assert "Skipped adding 2 invalid link data directories" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_collision_timestamps_different_urls(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/iana.org.html'], capture_output=True,
env=disable_extractors_dict)
archive_folders = [x.name for x in (tmp_path / "archive").iterdir()]
first_archive = tmp_path / "archive" / str(min([float(folder) for folder in archive_folders]))
archive_folders.remove(first_archive.name)
json_index = str(first_archive / "index.json")
with open(json_index, "r", encoding="utf-8") as f:
link_details = json.loads(f.read())
link_details["timestamp"] = archive_folders[0]
with open(json_index, "w", encoding="utf-8") as f:
json.dump(link_details, f)
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
assert "Skipped adding 1 invalid link data directories" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_orphaned_folders(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--json", "--with-headers"], capture_output=True)
with open(tmp_path / "index.json", "wb") as f:
f.write(list_process.stdout)
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
c.execute("DELETE from core_snapshot")
conn.commit()
conn.close()
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
assert "Added 1 orphaned links from existing JSON index" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_unrecognized_folders(tmp_path, process, disable_extractors_dict):
os.chdir(tmp_path)
subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'], capture_output=True,
env=disable_extractors_dict)
(tmp_path / "archive" / "some_random_folder").mkdir()
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
assert "Skipped adding 1 invalid link data directories" in init_process.stdout.decode("utf-8")
assert init_process.returncode == 0
def test_tags_migration(tmp_path, disable_extractors_dict):
base_sqlite_path = Path(__file__).parent / 'tags_migration'
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
shutil.copytree(str(base_sqlite_path), tmp_path)
os.chdir(tmp_path)
conn = sqlite3.connect("index.sqlite3")
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT id, tags from core_snapshot")
snapshots = c.fetchall()
snapshots_dict = { sn['id']: sn['tags'] for sn in snapshots}
conn.commit()
conn.close()
init_process = subprocess.run(['archivebox', 'init'], capture_output=True, env=disable_extractors_dict)
conn = sqlite3.connect("index.sqlite3")
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("""
SELECT core_snapshot.id, core_tag.name from core_snapshot
JOIN core_snapshot_tags on core_snapshot_tags.snapshot_id=core_snapshot.id
JOIN core_tag on core_tag.id=core_snapshot_tags.tag_id
""")
tags = c.fetchall()
conn.commit()
conn.close()
for tag in tags:
snapshot_id = tag["id"]
tag_name = tag["name"]
# Check each tag migrated is in the previous field
assert tag_name in snapshots_dict[snapshot_id]
| {
"repo_name": "pirate/bookmark-archiver",
"path": "tests/test_init.py",
"copies": "1",
"size": "7971",
"license": "mit",
"hash": 7180298746655652000,
"line_mean": 44.2897727273,
"line_max": 132,
"alpha_frac": 0.663153933,
"autogenerated": false,
"ratio": 3.3948040885860307,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9510202249503543,
"avg_score": 0.009551154416497582,
"num_lines": 176
} |
""" Archived flu data
http://webarchive.nationalarchives.gov.uk/20130107105354/http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm
"""
import collections
import calendar
import datetime
import re
import urllib
from lxml.html import fromstring, tostring
import requests
import slugify
from publish.lib.helpers import to_markdown, anchor_to_resource, get_dom, hd
ROOT = "http://webarchive.nationalarchives.gov.uk/20130107105354/http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm"
DESCRIPTION = None
def scrape_block(block, title):
global DESCRIPTION
dataset = {
"title": title,
"notes": DESCRIPTION,
"tags": ["sitrep", "winter"],
"origin": ROOT,
"resources": [anchor_to_resource(a) for a in block.cssselect('.itemLinks li a')],
"groups": ['winter']
}
dataset["name"] = slugify.slugify(dataset["title"]).lower()
for r in dataset["resources"]:
r['description'] = r['description'].replace('Download ', '')
return dataset
def scrape(workspace):
print "Scraping Archived Flu Data with workspace {}".format(workspace)
global DESCRIPTION
datasets = []
page = get_dom(ROOT)
DESCRIPTION = to_markdown(unicode(page.cssselect('.introText')[0].text_content().strip()))
containers = page.cssselect('.itemContainer')[1:]
datasets.append(scrape_block(containers[0], "Daily Hospital Situation Report 2011-12"))
datasets.append(scrape_block(containers[1], "Daily Hospital Situation Report 2010-11"))
datasets.append(scrape_block(containers[2], "Daily Flu Situation Report 2010-11"))
datasets.append(scrape_block(containers[3], "Daily SitRep Guidance 2011-12"))
datasets = filter(lambda x: x is not None, datasets)
print "Found {} datasets".format(len(datasets))
return datasets | {
"repo_name": "nhsengland/publish-o-matic",
"path": "datasets/nhse_stats/topics/archived_flu.py",
"copies": "1",
"size": "1941",
"license": "mit",
"hash": 6561314126686701000,
"line_mean": 36.3461538462,
"line_max": 187,
"alpha_frac": 0.7181865018,
"autogenerated": false,
"ratio": 3.641651031894934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840631291299429,
"avg_score": 0.003841248479101038,
"num_lines": 52
} |
# Archived method for visualizing functional load
#import igraph as ig
class FLGraph:
# Plots a graph based on the results of functional load calculation
# by taking a list of the phonemes that were calculated, which will be the graph's vertices,
# and a list of functional load values, which will be the graph's edges, from the results of
# the functional load dialog.
def __init__(self, results_dict):
self.graph = ig.Graph()
self.segments = []
self.fl_weights = []
for result in results_dict:
segment_1 = result['First segment']
segment_2 = result['Second segment']
fl_weight = result['Result']
self.fl_weights.append( [segment_1, segment_2, fl_weight] )
if not (segment_1 in self.segments):
self.segments.append(segment_1)
if not (segment_2 in self.segments):
self.segments.append(segment_2)
self.construct_graph()
# Plots a circular graph with edges connecting all the vertices, where the width of each edge is
# 50 * functional load
ig.plot(self.graph, layout=self.graph.layout_circle(), vertex_label=self.graph.vs["name"],
edge_width=[1-(50 * weight) for weight in self.graph.es["weight"]])
def construct_graph(self):
# Creates a graph from self.segments and self.fl_weights
self.graph.add_vertices(len(self.segments))
self.graph.vs["name"] = self.segments
for weight_list in self.fl_weights:
segment_1 = self.graph.vs.find(name=weight_list[0])
segment_2 = self.graph.vs.find(name=weight_list[1])
self.graph.add_edge(segment_1.index, segment_2.index, weight = weight_list[2])
| {
"repo_name": "PhonologicalCorpusTools/CorpusTools",
"path": "corpustools/gui/graph.py",
"copies": "1",
"size": "1773",
"license": "bsd-3-clause",
"hash": -5017766390830916000,
"line_mean": 44.4615384615,
"line_max": 104,
"alpha_frac": 0.6283135928,
"autogenerated": false,
"ratio": 3.8796498905908097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5007963483390809,
"avg_score": null,
"num_lines": null
} |
""" Archive manager.
Create or update an archive. Or extract object files from the archive.
"""
import argparse
from .base import base_parser, LogSetup
from .. import api
from ..binutils.objectfile import get_object
from ..binutils.archive import get_archive
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
parents=[base_parser],
)
subparsers = parser.add_subparsers(dest="command", required=True)
create_parser = subparsers.add_parser("create", help="create new archive")
create_parser.add_argument(
"archive", type=argparse.FileType("w"), help="Archive filename."
)
create_parser.add_argument(
"obj", type=argparse.FileType("r"), nargs="*", help="the object to link"
)
display_parser = subparsers.add_parser(
"display", help="display contents of an archive."
)
display_parser.add_argument(
"archive", type=argparse.FileType("r"), help="Archive filename."
)
def archive(args=None):
""" Run archive from command line """
args = parser.parse_args(args)
with LogSetup(args):
if args.command == "create":
objects = [get_object(obj) for obj in args.obj]
lib = api.archive(objects)
lib.save(args.archive)
elif args.command == "display":
lib = get_archive(args.archive)
for obj in lib:
print(obj)
for symbol in obj.symbols:
print(" ", symbol)
else: # pragma: no cover
raise NotImplementedError(args.command)
if __name__ == "__main__":
archive()
| {
"repo_name": "windelbouwman/ppci-mirror",
"path": "ppci/cli/archive.py",
"copies": "1",
"size": "1602",
"license": "bsd-2-clause",
"hash": -3235839845664914400,
"line_mean": 29.2264150943,
"line_max": 76,
"alpha_frac": 0.6485642946,
"autogenerated": false,
"ratio": 3.936117936117936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5084682230717936,
"avg_score": null,
"num_lines": null
} |
"""Archive one or more alerts."""
from baseCmd import *
from baseResponse import *
class archiveAlertsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""end date range to archive alerts (including) this date (use format "yyyy-MM-dd" or the new format "yyyy-MM-ddThh:mm:ss")"""
self.enddate = None
self.typeInfo['enddate'] = 'date'
"""the IDs of the alerts"""
self.ids = []
self.typeInfo['ids'] = 'list'
"""start date range to archive alerts (including) this date (use format "yyyy-MM-dd" or the new format "yyyy-MM-ddThh:mm:ss")"""
self.startdate = None
self.typeInfo['startdate'] = 'date'
"""archive by alert type"""
self.type = None
self.typeInfo['type'] = 'string'
self.required = []
class archiveAlertsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/archiveAlerts.py",
"copies": "1",
"size": "1196",
"license": "apache-2.0",
"hash": -7168970963380534000,
"line_mean": 32.2222222222,
"line_max": 136,
"alpha_frac": 0.5986622074,
"autogenerated": false,
"ratio": 3.8333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4931995540733334,
"avg_score": null,
"num_lines": null
} |
"""Archive one or more events."""
from baseCmd import *
from baseResponse import *
class archiveEventsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""end date range to archive events (including) this date (use format "yyyy-MM-dd" or the new format "yyyy-MM-ddThh:mm:ss")"""
self.enddate = None
self.typeInfo['enddate'] = 'date'
"""the IDs of the events"""
self.ids = []
self.typeInfo['ids'] = 'list'
"""start date range to archive events (including) this date (use format "yyyy-MM-dd" or the new format "yyyy-MM-ddThh:mm:ss")"""
self.startdate = None
self.typeInfo['startdate'] = 'date'
"""archive by event type"""
self.type = None
self.typeInfo['type'] = 'string'
self.required = []
class archiveEventsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""any text associated with the success or failure"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""true if operation is executed successfully"""
self.success = None
self.typeInfo['success'] = 'boolean'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/archiveEvents.py",
"copies": "1",
"size": "1196",
"license": "apache-2.0",
"hash": 5079152821762764000,
"line_mean": 32.2222222222,
"line_max": 136,
"alpha_frac": 0.5986622074,
"autogenerated": false,
"ratio": 3.858064516129032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49567267235290324,
"avg_score": null,
"num_lines": null
} |
"""Archive processor test suite."""
import collections.abc
import itertools
import pathlib
import pytest
import holocron
from holocron._processors import archive
@pytest.fixture(scope="function")
def testapp():
return holocron.Application({"url": "https://yoda.ua"})
def test_item(testapp):
"""Archive processor has to work!"""
stream = archive.process(
testapp, [holocron.Item({"title": "The Force", "content": "Obi-Wan"})]
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item({"title": "The Force", "content": "Obi-Wan"}),
holocron.WebSiteItem(
{
"source": pathlib.Path("archive://index.html"),
"destination": pathlib.Path("index.html"),
"template": "archive.j2",
"items": [
holocron.Item({"title": "The Force", "content": "Obi-Wan"})
],
"baseurl": testapp.metadata["url"],
}
),
]
@pytest.mark.parametrize(
["amount"],
[
pytest.param(0),
pytest.param(1),
pytest.param(2),
pytest.param(5),
pytest.param(10),
],
)
def test_item_many(testapp, amount):
"""Archive processor has to work with stream."""
stream = archive.process(
testapp,
[
holocron.Item({"title": "The Force (part #%d)" % i})
for i in range(amount)
],
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == list(
itertools.chain(
[
holocron.Item({"title": "The Force (part #%d)" % i})
for i in range(amount)
],
[
holocron.WebSiteItem(
{
"source": pathlib.Path("archive://index.html"),
"destination": pathlib.Path("index.html"),
"template": "archive.j2",
"items": [
holocron.Item(
{"title": "The Force (part #%d)" % i}
)
for i in range(amount)
],
"baseurl": testapp.metadata["url"],
}
)
],
)
)
def test_args_template(testapp):
"""Archive processor has respect 'template' argument."""
stream = archive.process(
testapp,
[holocron.Item({"title": "The Force", "content": "Obi-Wan"})],
template="foobar.txt",
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item({"title": "The Force", "content": "Obi-Wan"}),
holocron.WebSiteItem(
{
"source": pathlib.Path("archive://index.html"),
"destination": pathlib.Path("index.html"),
"template": "foobar.txt",
"items": [
holocron.Item({"title": "The Force", "content": "Obi-Wan"})
],
"baseurl": testapp.metadata["url"],
}
),
]
@pytest.mark.parametrize(
["save_as"],
[
pytest.param(pathlib.Path("posts", "skywalker.luke"), id="deep"),
pytest.param(pathlib.Path("yoda.jedi"), id="flat"),
],
)
def test_args_save_as(testapp, save_as):
"""Archive processor has to respect 'save_as' argument."""
stream = archive.process(
testapp,
[holocron.Item({"title": "The Force", "content": "Obi-Wan"})],
save_as=str(save_as),
)
assert isinstance(stream, collections.abc.Iterable)
assert list(stream) == [
holocron.Item({"title": "The Force", "content": "Obi-Wan"}),
holocron.WebSiteItem(
{
"source": pathlib.Path("archive://", save_as),
"destination": save_as,
"template": "archive.j2",
"items": [
holocron.Item({"title": "The Force", "content": "Obi-Wan"})
],
"baseurl": testapp.metadata["url"],
}
),
]
@pytest.mark.parametrize(
["args", "error"],
[
pytest.param(
{"save_as": 42},
"save_as: 42 is not of type 'string'",
id="save_as-int",
),
pytest.param(
{"template": 42},
"template: 42 is not of type 'string'",
id="template-int",
),
pytest.param(
{"save_as": [42]},
"save_as: [42] is not of type 'string'",
id="save_as-list",
),
pytest.param(
{"template": [42]},
"template: [42] is not of type 'string'",
id="template-list",
),
pytest.param(
{"save_as": {"x": 1}},
"save_as: {'x': 1} is not of type 'string'",
id="save_as-dict",
),
pytest.param(
{"template": {"y": 2}},
"template: {'y': 2} is not of type 'string'",
id="template-dict",
),
],
)
def test_args_bad_value(testapp, args, error):
"""Archive processor has to validate input arguments."""
with pytest.raises(ValueError) as excinfo:
next(archive.process(testapp, [], **args))
assert str(excinfo.value) == error
| {
"repo_name": "ikalnytskyi/holocron",
"path": "tests/_processors/test_archive.py",
"copies": "1",
"size": "5430",
"license": "bsd-3-clause",
"hash": 3443619822366047000,
"line_mean": 27.7301587302,
"line_max": 79,
"alpha_frac": 0.4767955801,
"autogenerated": false,
"ratio": 4.031180400890869,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 189
} |
# archive.py
import struct
class Archive(object):
NODE_NULL = 0
NODE_BOOLEAN = 1
NODE_INT8 = 2
NODE_UINT8 = 3
NODE_INT16 = 4
NODE_UINT16 = 5
NODE_INT32 = 6
NODE_UINT32 = 7
NODE_INT64 = 8
NODE_UINT64 = 9
NODE_FLOAT = 10
NODE_DOUBLE = 11
NODE_STRING = 12
NODE_ARRAY_BOOLEAN = 13
NODE_ARRAY_INT8 = 14
NODE_ARRAY_UINT8 = 15
NODE_ARRAY_INT16 = 16
NODE_ARRAY_UINT16 = 17
NODE_ARRAY_INT321 = 18
NODE_ARRAY_UINT32 = 19
NODE_ARRAY_INT64 = 20
NODE_ARRAY_UINT64 = 21
NODE_ARRAY_FLOAT = 22
NODE_ARRAY_DOUBLE = 23
NODE_ARRAY_GENERIC = 24
NODE_OBJECT = 25
class Node(object):
def __init__(self):
self.type = Archive.NODE_NULL
self.data = None
def __init__(self):
self.stack = list()
self.root = Archive.Node()
def pop(self):
self.root = self.stack.pop()
def null(self):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
def u8(self, value):
# Make sure this node type is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_UINT8
self.root.data = value
def u32(self, value):
# Make sure this node type is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_UINT32
self.root.data = value
def f32(self, value):
# Make sure this node type is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_FLOAT
self.root.data = value
def string(self, string):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_STRING
self.root.data = string
def typed_array_i8(self, array):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_ARRAY_INT8
self.root.data = array
def typed_array_i16(self, array):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_ARRAY_INT16
self.root.data = array
def typed_array_u16(self, array):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_ARRAY_UINT16
self.root.data = array
def typed_array_u32(self, array):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_ARRAY_UINT32
self.root.data = array
def typed_array_f32(self, array):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_ARRAY_FLOAT
self.root.data = array
def as_generic_array(self):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_ARRAY_GENERIC
self.root.data = list()
def push_generic_array_element(self):
# Make sure this node is a generic array
assert(self.root.type == Archive.NODE_ARRAY_GENERIC)
# Add a new node as an element of the root
new_root = Archive.Node()
self.root.data.append(new_root)
self.stack.append(self.root)
self.root = new_root
def as_object(self):
# Make sure this node is null
assert(self.root.type == Archive.NODE_NULL)
self.root.type = Archive.NODE_OBJECT
self.root.data = list()
def push_object_member(self, name):
# Make sure this node is an object
assert(self.root.type == Archive.NODE_OBJECT)
# Add a new node as a member of the root
new_root = Archive.Node()
self.root.data.append((name, new_root))
self.stack.append(self.root)
self.root = new_root
def to_binary(self):
buffer = bytearray()
Archive.binary_node(buffer, self.root)
return buffer
@staticmethod
def binary_node_type(buffer, type_v):
buffer += struct.pack('B', type_v)
@staticmethod
def binary_number(buffer, node, fmt):
Archive.binary_node_type(buffer, node.type)
buffer += struct.pack(fmt, node.data)
@staticmethod
def binary_string(buffer, node):
Archive.binary_node_type(buffer, Archive.NODE_STRING)
buffer += struct.pack('I', len(node.data))
buffer += struct.pack('{}s'.format(len(node.data)), node.data.encode())
@staticmethod
def binary_typed_array(buffer, node, fmt):
Archive.binary_node_type(buffer, node.type)
buffer += struct.pack('I', len(node.data))
buffer += struct.pack('{}{}'.format(len(node.data), fmt), *node.data)
@staticmethod
def binary_generic_array(buffer, node):
# Put the node type and array size into this buffer
Archive.binary_node_type(buffer, Archive.NODE_ARRAY_GENERIC)
buffer += struct.pack('I', len(node.data))
# Serialize the array into a new buffer
array_buff = bytearray()
for element in node.data:
# Serialize element
Archive.binary_node(array_buff, element)
# Append the span to the original buffer (accounts for indicator byte, size, span, and contents)
buffer += struct.pack('I', 1 + 4 + 4 + len(array_buff))
# Append the array buffer
buffer += array_buff
@staticmethod
def binary_object(buffer, node):
# Put the node type and object size into this buffer
Archive.binary_node_type(buffer, Archive.NODE_OBJECT)
buffer += struct.pack('I', len(node.data))
# Serialize the object into a new buffer
object_buff = bytearray()
for name, member in node.data:
# Add name and null-terminating byte
object_buff += struct.pack('{}s'.format(len(name)), name.encode())
object_buff += struct.pack('B', 0)
# Serialize node
Archive.binary_node(object_buff, member)
# Append the span to the original buffer (accounts for indicator byte, size, span, and contents)
buffer += struct.pack('I', 1 + 4 + 4 + len(object_buff))
# Append the object buffer
buffer += object_buff
@staticmethod
def binary_node(buffer, node):
if node.type == Archive.NODE_NULL:
Archive.binary_node_type(buffer, Archive.NODE_NULL)
elif node.type == Archive.NODE_UINT8:
Archive.binary_number(buffer, node, 'B')
elif node.type == Archive.NODE_UINT32:
Archive.binary_number(buffer, node, 'I')
elif node.type == Archive.NODE_FLOAT:
Archive.binary_number(buffer, node, 'f')
elif node.type == Archive.NODE_STRING:
Archive.binary_string(buffer, node)
elif node.type == Archive.NODE_ARRAY_INT8:
Archive.binary_typed_array(buffer, node, 'b')
elif node.type == Archive.NODE_ARRAY_INT16:
Archive.binary_typed_array(buffer, node, 'h')
elif node.type == Archive.NODE_ARRAY_UINT16:
Archive.binary_typed_array(buffer, node, 'H')
elif node.type == Archive.NODE_ARRAY_UINT32:
Archive.binary_typed_array(buffer, node, 'I')
elif node.type == Archive.NODE_ARRAY_FLOAT:
Archive.binary_typed_array(buffer, node, 'f')
elif node.type == Archive.NODE_ARRAY_GENERIC:
Archive.binary_generic_array(buffer, node)
elif node.type == Archive.NODE_OBJECT:
Archive.binary_object(buffer, node)
| {
"repo_name": "willcassella/SinGE",
"path": "Tools/SinGED/archive.py",
"copies": "1",
"size": "7678",
"license": "mit",
"hash": 3437826083158689000,
"line_mean": 31.2605042017,
"line_max": 104,
"alpha_frac": 0.6056264652,
"autogenerated": false,
"ratio": 3.574487895716946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46801143609169454,
"avg_score": null,
"num_lines": null
} |
ARCHIVER_INITIATED = 'INITIATED'
ARCHIVER_FAILURE = 'FAILURE'
ARCHIVER_SUCCESS = 'SUCCESS'
ARCHIVER_SENT = 'SENT'
ARCHIVER_PENDING = 'ARCHIVING'
ARCHIVER_CHECKING = 'CHECKING'
ARCHIVER_SENDING = 'SENDING'
ARCHIVER_NETWORK_ERROR = 'NETWORK_ERROR'
ARCHIVER_SIZE_EXCEEDED = 'SIZE_EXCEEDED'
ARCHIVER_FILE_NOT_FOUND = 'FILE_NOT_FOUND'
ARCHIVER_FORCED_FAILURE = 'FORCED_FAILURE'
ARCHIVER_UNCAUGHT_ERROR = 'UNCAUGHT_ERROR'
ARCHIVER_FAILURE_STATUSES = {
ARCHIVER_FAILURE,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
ARCHIVER_FORCED_FAILURE,
ARCHIVER_UNCAUGHT_ERROR,
}
NO_ARCHIVE_LIMIT = 'high_upload_limit'
class StatResult(object):
"""
Helper class to collect metadata about a single file
"""
num_files = 1
def __init__(self, target_id, target_name, disk_usage=0):
self.target_id = target_id
self.target_name = target_name
self.disk_usage = float(disk_usage)
def __str__(self):
return str(self._to_dict())
def _to_dict(self):
return {
'target_id': self.target_id,
'target_name': self.target_name,
'disk_usage': self.disk_usage,
}
class AggregateStatResult(object):
"""
Helper class to collect metadata about arbitrary depth file/addon/node file trees
"""
def __init__(self, target_id, target_name, targets=None):
self.target_id = target_id
self.target_name = target_name
targets = targets or []
self.targets = [target for target in targets if target]
def __str__(self):
return str(self._to_dict())
def _to_dict(self):
return {
'target_id': self.target_id,
'target_name': self.target_name,
'targets': [
target._to_dict()
for target in self.targets
],
'num_files': self.num_files,
'disk_usage': self.disk_usage,
}
@property
def num_files(self):
return sum([value.num_files for value in self.targets])
@property
def disk_usage(self):
return sum([value.disk_usage for value in self.targets])
| {
"repo_name": "aaxelb/osf.io",
"path": "website/archiver/__init__.py",
"copies": "6",
"size": "2176",
"license": "apache-2.0",
"hash": -1600871273440980500,
"line_mean": 26.2,
"line_max": 85,
"alpha_frac": 0.6148897059,
"autogenerated": false,
"ratio": 3.426771653543307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7041661359443306,
"avg_score": null,
"num_lines": null
} |
"""Archive the NWS WaWA map.
https://mesonet.agron.iastate.edu/timemachine/#59.0
"""
import subprocess
import tempfile
import os
from pyiem.util import exponential_backoff, logger, utc
import requests
LOG = logger()
SRC = "https://forecast.weather.gov/wwamap/png/US.png"
def main():
"""Go Main Go."""
utcnow = utc()
req = exponential_backoff(requests.get, SRC, timeout=15)
if req is None:
LOG.info("Failed to fetch %s", SRC)
return
if req.status_code != 200 or len(req.content) == 0:
LOG.info(
"Fail %s status_code: %s len(content): %s",
SRC,
req.status_code,
len(req.content),
)
return
tmpfd = tempfile.NamedTemporaryFile(delete=False)
with open(tmpfd.name, "wb") as fh:
fh.write(req.content)
dstamp = utcnow.strftime("%Y%m%d%H%M")
pqstr = f"plot a {dstamp} bogus wwa/wwa_{dstamp}.png png"
LOG.debug(pqstr)
subprocess.call(f"pqinsert -i -p '{pqstr}' {tmpfd.name}", shell=True)
os.unlink(tmpfd.name)
if __name__ == "__main__":
main()
| {
"repo_name": "akrherz/iem",
"path": "scripts/cache/nws_wawa_archive.py",
"copies": "1",
"size": "1089",
"license": "mit",
"hash": 1046885741890760300,
"line_mean": 24.9285714286,
"line_max": 73,
"alpha_frac": 0.6051423324,
"autogenerated": false,
"ratio": 3.1203438395415475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9225486171941547,
"avg_score": 0,
"num_lines": 42
} |
# Archivo con funciones de control para SAGE
import datetime
# Las Tuplas de cada puesto deben tener los horarios de inicio y de cierre para que
# pueda funcionar [(7:00,7:00), (19:00,19:00)]
# Suponiendo que cada estacionamiento tiene una estructura "matricial" lista de listas
# donde si m es una matriz, m[i,j] las i corresponden a los puestos y las j corresponden a tuplas
# con el horario inicio y fin de las reservas
# [[(horaIn,horaOut),(horaIn,horaOut)],[],....]
# chequeo de horarios de extended
def HorarioEstacionamiento(HoraInicio, HoraFin, ReservaInicio, ReservaFin):
if HoraInicio >= HoraFin:
return (False, 'El horario de apertura debe ser menor al horario de cierre')
if ReservaInicio >= ReservaFin:
return (False, 'El horario de inicio de reserva debe ser menor al horario de cierre')
if ReservaInicio < HoraInicio:
return (False, 'El horario de inicio de reserva debe mayor o igual al horario de apertura del estacionamiento')
if ReservaInicio > HoraFin:
return (False, 'El horario de comienzo de reserva debe ser menor al horario de cierre del estacionamiento')
if ReservaFin < HoraInicio:
return (False, 'El horario de apertura de estacionamiento debe ser menor al horario de finalización de reservas')
if ReservaFin > HoraFin:
return (False, 'El horario de cierre de estacionamiento debe ser mayor o igual al horario de finalización de reservas')
return (True, '')
# busca un puesta en el estacionamiento
def buscar(hin, hout, estacionamiento):
if not isinstance(estacionamiento, list):
return (-1, -1, False)
if len(estacionamiento) == 0:
return (-1, -1, False)
if not isinstance(hin, datetime.time) or not isinstance(hout, datetime.time):
return (-1, -1, False)
for i in range(len(estacionamiento)):
posicion = busquedaBin(hin, hout, estacionamiento[i])
if posicion[1] == True:
return (i, posicion[0], posicion[1])
return (-1, -1, False)
def binaria(valor, inicio, fin, lista):
if inicio == fin:
return inicio
centro = (inicio + fin) // 2
if lista[centro][0] > valor:
return binaria(valor, inicio, centro, lista)
if lista[centro][0] < valor:
return binaria(valor, centro + 1, fin, lista)
return centro
# Busca en una lista ordenada la posicion en la que una nueva tupla
# puede ser insertado, y ademas devuelve un booleano que dice si la
# tupla puede ser insertada, es decir que sus valores no solapen alguno
# ya existente.
# Precondición: la lista debe tener ya la mayor y menor posible tupla
def busquedaBin(hin, hout, listaTuplas):
# ln = len(listaTuplas)
if not isinstance(listaTuplas, list):
return (0, False)
if len(listaTuplas) == 0:
return (0, True)
if not isinstance(hin, datetime.time) or not isinstance(hout, datetime.time):
return (0, False)
index = binaria(hin, 0, len(listaTuplas), listaTuplas)
if index == 0:
index = index + 1
if listaTuplas[index][0] >= hout and listaTuplas[index - 1][1] <= hin:
return (index, True)
else:
return (index, False)
# inserta ordenadamente por hora de inicio
def insertarReserva(hin, hout, puesto, listaReserva):
# no verifica precondicion, se supone que se hace buscar antes para ver si se puede agregar
if not isinstance(listaReserva, list):
return None
if len(listaReserva) == 0:
return listaReserva
if not isinstance(hin, datetime.time) or not isinstance(hout, datetime.time):
return listaReserva
tupla = (hin, hout)
listaReserva.insert(puesto, tupla)
# estacionamiento[puesto].sort()
return listaReserva
def reservar(hin, hout, estacionamiento):
if not isinstance(estacionamiento, list):
return 1
if len(estacionamiento) == 0:
return 1
if not isinstance(hin, datetime.time) or not isinstance(hout, datetime.time):
return 1
puesto = buscar(hin, hout, estacionamiento)
if puesto[2] != False:
estacionamiento[puesto[0]] = insertarReserva(hin, hout, puesto[1], estacionamiento[puesto[0]])
return estacionamiento
else:
return 1
def validarHorarioReserva(ReservaInicio, ReservaFin, HorarioApertura, HorarioCierre):
if ReservaInicio >= ReservaFin:
return (False, 'El horario de apertura debe ser menor al horario de cierre')
if ReservaFin.hour - ReservaInicio.hour < 1:
return (False, 'El tiempo de reserva debe ser al menos de 1 hora')
if ReservaFin > HorarioCierre:
return (False, 'El horario de inicio de reserva debe estar en un horario válido')
if ReservaInicio < HorarioApertura:
return (False, 'El horario de cierre de reserva debe estar en un horario válido')
return (True, '')
| {
"repo_name": "Sealos/SAGE",
"path": "src/SAGE/estacionamientos/controller.py",
"copies": "1",
"size": "4488",
"license": "mit",
"hash": -174221339331046050,
"line_mean": 37.6465517241,
"line_max": 121,
"alpha_frac": 0.7381217934,
"autogenerated": false,
"ratio": 2.4891726818434203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8545056032678979,
"avg_score": 0.03644768851288824,
"num_lines": 116
} |
# ARCHIVO DONDE SE HACEN TODAS LAS OPERACIONES Y SE CREA UN MENÚ
# Se importan las clases a usar
from agencia import Agencia
from vendedor import Vendedor
from cliente import Cliente
from automovil import Automovil
from camion import Camion
from motocicleta import Motocicleta
# Se declara la agencia donde se realizan las compra/ventas
agencia = Agencia("COMPRA Y VENTA DE VEHICULOS CHIDOS 'POKIMON :D'","Enrique Segoviano #8, Colonia Zaka Mo-Koh")
# Los datos de la agencia se guardan en una variable
datosAgencia = "\n{0}\nVisitanos en: {1}\n".format(agencia.getNombre(), agencia.getDireccion())
# Declaracion de cinco vendedores (W.E.T)
Vend1 = Vendedor("Gokú", "Kakaroto", "Chavez", "30", "Monte Paozu #1", "4658-46873", "1")
Vend2 = Vendedor("Nikola", "Tesla", "Edison", "39", "Smiljan #1006", "3491-6581", "2")
Vend3 = Vendedor("José Doroteo", "Arango", "Arámbula", "45", "Division del Norte #1913", "3291-8781", "3")
Vend4 = Vendedor("Benito Amilcare", "Andrea", "Mussolini", "61", "Predappio #29", "5491-3881", "4")
Vend5 = Vendedor("Napoleone", "di", "Buonaparte", "51", "Ajaccio #15", "1281-9671", "5")
# Declaracion de cinco clientes (W.E.T)
Cl1 = Cliente("Juanito", "Lopez", "Lopez", "35", "Bonita #23", "5159-4864")
Cl2 = Cliente("Lalito", "Flores", "Peña", "29", "Privada pública #40", "9786-8451")
Cl3 = Cliente("Rubencito", "Martinez", "Salinas", "40", "Carmen Salinas S/N", "2135-4886")
Cl4 = Cliente("Germancito", "Hernandez", "Gil", "27", "Florencia #651", "6584-6685")
Cl5 = Cliente("Ramoncito", "Garza", "Perez", "25", "Sin Datos", "6548-9447")
# Declaracion de cinco Automoviles (W.E.T)
Aut1 = Automovil("Ford", "Mustang", "Rojo", "Automatico", "6 cilindros", "494,000", "Motor V6", "5", "2 puertas", "Equipado: No", "14km/L")
Aut2 = Automovil("KIA", "Optima", "Negro", "Automatico", "4 cilindros", "389,400", "2.4L GDI ", "5", "4 puertas", "Equipado: Si", "18.36km/L")
Aut3 = Automovil("Honda", "Accord", "Rojo", "Manual", "4 cilindros", "393,900", "2.4L EXL V6", "5", "4 puertas", "Equipado: Si", "18.5km/L")
Aut4 = Automovil("Chevrolet", "Camaro", "Amarillo", "Manual", "6 cilindros", "473,400", " 2.0L Turbo", "5", "2 puertas", "Equipado: No", "13.1km/L")
Aut5 = Automovil("Dodge", "Charger", "Gris", "Automatico", "8 cilindros", "711,900", "Hemi V8 5.7L", "5", "4 puertas", "Equipado: Si", "13km/L")
# Declaracion de cinco Camiones (W.E.T)
Cam1 = Camion("Ford", "Truck T-1", "Negro", "Manual", "cilindros", "", "motor", "5", "ejes", "potencia")
Cam2 = Camion("Ford", "Truck T-2", "Azul", "Manual", "cilindros", "precio", "motor", "5", "ejes", "potencia")
Cam3 = Camion("Ford", "Truck T-3", "Blanco", "Manual", "cilindros", "precio", "motor", "5", "ejes", "potencia")
Cam4 = Camion("Ford", "Truck T-4", "Rojo", "Manual", "cilindros", "precio", "motor", "5", "ejes", "potencia")
Cam5 = Camion("Ford", "Truck T-5", "Gris", "Manual", "cilindros", "precio", "motor", "5", "ejes", "potencia")
# Declaracion de cinco Motocicletas (W.E.T)
Moto1 = Motocicleta("Honda", "Zoomer X", "Azul/Naranja", "VMATIC", "2 cilindros", "43,990", "SOHC 2V", "5", "Motoneta", "110cc")
Moto2 = Motocicleta("Honda", "CRF 230 F", "Rojo", "Tipo Retorno", "4 cilindros", "77,990", "Motor OHC", "5", "Off Road", "223cc")
Moto3 = Motocicleta("Honda", "CB190R", "Rojo", "Tipo Retorno", "4 cilindros", "50,990", "Motor OHC", "5", "Deportiva", "184.4cc")
Moto4 = Motocicleta("Harley-Davidson", "Street Road", "Negro", "N/A", "6 cilindros", "181,500", "High Output Revolution X", "5", "Deportiva", "750cc")
Moto5 = Motocicleta("Harley-Davidson", "Wide Glide", "Negro", "N/A", "6 cilindros", "290,900", "Twin Cam 103", "5", "Deportiva", "1,690 cc")
# En listaVenta se guardan los datos de la venta realizada
listaVenta = []
# En listaCompra se guardan los datos de la compra realizada
listaCompra = []
# Se inicia el menú con la condicion que opcion sea diferente a 0
opcion = 10
while (opcion != 0):
print("\n"*25 + datosAgencia)
print("BIENVENIDO A NUESTRA AGENCIA")
print("¿Qué operación deseas realizar?")
print("1: Ver vehiculos.")
print("2: Existencia de Vehiculos.")
print("3: Vender vehiculo.")
print("4: Comprar vehiculo.")
print("5: Ver Clientes.")
print("6: Ver Vendedores.")
print("7: Reportes.")
print("0: Salir.")
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
opcion = 10
else:
opcion = int(cond)
# Opcion 1 crea un submenú para saber que tipo de vehiculo quieres ver
if opcion == 1 :
print("\n"*25 + "¿Qué deseas ver?")
print("1: Automoviles.")
print("2: Camiones.")
print("3: Motocicletas.")
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op = 10
else:
op = int(cond)
# En caso 1 se imprimen los datos de los todos los
# Automoviles usando el metodo informeAutomovil
if op == 1:
print("\n"*25)
print(Aut1.informeAutomovil() + "\n" + "="*80)
print(Aut2.informeAutomovil() + "\n" + "="*80)
print(Aut3.informeAutomovil() + "\n" + "="*80)
print(Aut4.informeAutomovil() + "\n" + "="*80)
print(Aut5.informeAutomovil() + "\n" + "="*80)
input("Pulse Enter para continuar")
# En caso 2 se imprimen los datos de los todos los
# Camiones usando el metodo informeCamion
elif op == 2:
print("\n"*25)
print(Cam1.informeCamion() + "\n" + "="*80)
print(Cam2.informeCamion() + "\n" + "="*80)
print(Cam3.informeCamion() + "\n" + "="*80)
print(Cam4.informeCamion() + "\n" + "="*80)
print(Cam5.informeCamion() + "\n" + "="*80)
input("Pulse Enter para continuar")
# En caso 3 se imprimen los datos de los todas las
# Motocicletas usando el metodo informeMotocicleta
elif op == 3:
print("\n"*25)
print(Moto1.informeMotocicleta() + "\n" + "="*80)
print(Moto2.informeMotocicleta() + "\n" + "="*80)
print(Moto3.informeMotocicleta() + "\n" + "="*80)
print(Moto4.informeMotocicleta() + "\n" + "="*80)
print(Moto5.informeMotocicleta() + "\n" + "="*80)
input("Pulse Enter para continuar")
# Opcion 2 crea un submenú para saber que tipo de vehiculo quieres ver
elif opcion == 2:
print("\n"*25 + "¿Qué deseas ver?")
print("1: Automoviles.")
print("2: Camiones.")
print("3: Motocicletas.")
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op = 10
else:
op = int(cond)
# En caso 1 se imprime la existencia de cada automovil
# usando el metodo getExistencia
if op == 1:
print("\n"*25 + "Automoviles:\n")
print(Aut1.getExistencia())
print(Aut2.getExistencia())
print(Aut3.getExistencia())
print(Aut4.getExistencia())
print(Aut5.getExistencia())
input("\nPulse Enter para continuar")
# En caso 2 se imprime la existencia de cada camion
# usando el metodo getExistencia
elif op == 2:
print("\n"*25 + "Camiones:\n")
print(Cam1.getExistencia())
print(Cam2.getExistencia())
print(Cam3.getExistencia())
print(Cam4.getExistencia())
print(Cam5.getExistencia())
input("\nPulse Enter para continuar")
# En caso 3 se imprime la existencia de cada motocicleta
# usando el metodo getExistencia
elif op == 3:
print("\n"*25 + "Motocicletas:\n")
print(Moto1.getExistencia())
print(Moto2.getExistencia())
print(Moto3.getExistencia())
print(Moto4.getExistencia())
print(Moto5.getExistencia())
input("\nPulse Enter para continuar")
# Opcion 3 crea un submenú para saber que tipo de vehiculo quieres vender
elif opcion == 3:
print("\n"*25 + "¿Qué se venderá?")
print("1: Automovil.")
print("2: Camion.")
print("3: Motocicleta.")
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op = 10
else:
op = int(cond)
# Un submenu para cada caso
if op == 1:
print("\n"*25 + "Automovil a vender: ")
print("1: {0} {1}".format(Aut1.getMarca(), Aut1.getModelo()))
print("2: {0} {1}".format(Aut2.getMarca(), Aut2.getModelo()))
print("3: {0} {1}".format(Aut3.getMarca(), Aut3.getModelo()))
print("4: {0} {1}".format(Aut4.getMarca(), Aut4.getModelo()))
print("5: {0} {1}".format(Aut5.getMarca(), Aut5.getModelo()))
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op2 = 10
else:
op2 = int(cond)
if op2 == 1:
if int(Aut1.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Aut1.getMarca(), Aut1.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl3.getNombre() + " " + Cl3.getApellidoPaterno() + " " + Cl3.getApellidoMaterno()),
(Aut1.getMarca() + " " + Aut1.getModelo()),
(Vend1.getNombre() + " " + Vend1.getApellidoPaterno() + " " + Vend1.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Aut1.existencia) - 1
Aut1.decrementaExist(menos)
elif op2 == 2:
if int(Aut2.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Aut2.getMarca(), Aut2.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl5.getNombre() + " " + Cl5.getApellidoPaterno() + " " + Cl5.getApellidoMaterno()),
(Aut2.getMarca() + " " + Aut2.getModelo()),
(Vend2.getNombre() + " " + Vend2.getApellidoPaterno() + " " + Vend2.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Aut2.existencia) - 1
Aut2.decrementaExist(menos)
elif op2 == 3:
if int(Aut3.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Aut3.getMarca(), Aut3.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl4.getNombre() + " " + Cl4.getApellidoPaterno() + " " + Cl4.getApellidoMaterno()),
(Aut3.getMarca() + " " + Aut3.getModelo()),
(Vend3.getNombre() + " " + Vend3.getApellidoPaterno() + " " + Vend3.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Aut3.existencia) - 1
Aut3.decrementaExist(menos)
elif op2 == 4:
if int(Aut4.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Aut4.getMarca(), Aut4.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl2.getNombre() + " " + Cl2.getApellidoPaterno() + " " + Cl2.getApellidoMaterno()),
(Aut4.getMarca() + " " + Aut4.getModelo()),
(Vend4.getNombre() + " " + Vend4.getApellidoPaterno() + " " + Vend4.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Aut4.existencia) - 1
Aut4.decrementaExist(menos)
elif op2 == 5:
if int(Aut5.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Aut5.getMarca(), Aut5.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl3.getNombre() + " " + Cl3.getApellidoPaterno() + " " + Cl3.getApellidoMaterno()),
(Aut5.getMarca() + " " + Aut5.getModelo()),
(Vend5.getNombre() + " " + Vend5.getApellidoPaterno() + " " + Vend5.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Aut5.existencia) - 1
Aut5.decrementaExist(menos)
elif op == 2:
print("\n"*25 + "¿Qué camion deseas comprar?")
print("1: {0} {1}".format(Cam1.getMarca(), Cam1.getModelo()))
print("2: {0} {1}".format(Cam2.getMarca(), Cam2.getModelo()))
print("3: {0} {1}".format(Cam3.getMarca(), Cam3.getModelo()))
print("4: {0} {1}".format(Cam4.getMarca(), Cam4.getModelo()))
print("5: {0} {1}".format(Cam5.getMarca(), Cam5.getModelo()))
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op2 = 10
else:
op2 = int(cond)
if op2 == 1:
if int(Cam1.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Cam1.getMarca(), Cam1.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl4.getNombre() + " " + Cl4.getApellidoPaterno() + " " + Cl4.getApellidoMaterno()),
(Cam1.getMarca() + " " + Cam1.getModelo()),
(Vend5.getNombre() + " " + Vend5.getApellidoPaterno() + " " + Vend5.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Cam1.existencia) - 1
Cam1.decrementaExist(menos)
elif op2 == 2:
if int(Cam2.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Cam2.getMarca(), Cam2.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl2.getNombre() + " " + Cl2.getApellidoPaterno() + " " + Cl2.getApellidoMaterno()),
(Cam2.getMarca() + " " + Cam2.getModelo()),
(Vend4.getNombre() + " " + Vend4.getApellidoPaterno() + " " + Vend4.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Cam2.existencia) - 1
elif op2 == 3:
if int(Cam3.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Cam3.getMarca(), Cam3.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl5.getNombre() + " " + Cl5.getApellidoPaterno() + " " + Cl5.getApellidoMaterno()),
(Cam3.getMarca() + " " + Cam3.getModelo()),
(Vend3.getNombre() + " " + Vend3.getApellidoPaterno() + " " + Vend3.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Cam3.existencia) - 1
Cam3.decrementaExist(menos)
elif op2 == 4:
if int(Cam4.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Cam4.getMarca(), Cam4.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl3.getNombre() + " " + Cl3.getApellidoPaterno() + " " + Cl3.getApellidoMaterno()),
(Cam4.getMarca() + " " + Cam4.getModelo()),
(Vend2.getNombre() + " " + Vend2.getApellidoPaterno() + " " + Vend2.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Cam4.existencia) - 1
Cam4.decrementaExist(menos)
elif op2 == 5:
if int(Cam5.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Cam5.getMarca(), Cam5.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl1.getNombre() + " " + Cl1.getApellidoPaterno() + " " + Cl1.getApellidoMaterno()),
(Cam5.getMarca() + " " + Cam5.getModelo()),
(Vend1.getNombre() + " " + Vend1.getApellidoPaterno() + " " + Vend1.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Cam5.existencia) - 1
Cam5.decrementaExist(menos)
elif op == 3:
print("\n"*25 + "¿Qué motocicleta deseas comprar?")
print("1: {0} {1}".format(Moto1.getMarca(), Moto1.getModelo()))
print("2: {0} {1}".format(Moto2.getMarca(), Moto2.getModelo()))
print("3: {0} {1}".format(Moto3.getMarca(), Moto3.getModelo()))
print("4: {0} {1}".format(Moto4.getMarca(), Moto4.getModelo()))
print("5: {0} {1}".format(Moto5.getMarca(), Moto5.getModelo()))
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op2 = 10
else:
op2 = int(cond)
if op2 == 1:
if int(Moto1.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Moto1.getMarca(), Moto1.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl2.getNombre() + " " + Cl2.getApellidoPaterno() + " " + Cl2.getApellidoMaterno()),
(Moto1.getMarca() + " " + Moto1.getModelo()),
(Vend3.getNombre() + " " + Vend3.getApellidoPaterno() + " " + Vend3.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Moto1.existencia) - 1
Moto1.decrementaExist(menos)
elif op2 == 2:
if int(Moto2.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Moto2.getMarca(), Moto2.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl4.getNombre() + " " + Cl4.getApellidoPaterno() + " " + Cl4.getApellidoMaterno()),
(Moto2.getMarca() + " " + Moto2.getModelo()),
(Vend4.getNombre() + " " + Vend4.getApellidoPaterno() + " " + Vend4.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Moto2.existencia) - 1
Moto2.decrementaExist(menos)
elif op2 == 3:
if int(Moto3.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Moto3.getMarca(), Moto3.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl3.getNombre() + " " + Cl3.getApellidoPaterno() + " " + Cl3.getApellidoMaterno()),
(Moto3.getMarca() + " " + Moto3.getModelo()),
(Vend1.getNombre() + " " + Vend1.getApellidoPaterno() + " " + Vend1.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Moto3.existencia) - 1
Moto3.decrementaExist(menos)
elif op2 == 4:
if int(Moto4.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Moto4.getMarca(), Moto4.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl5.getNombre() + " " + Cl5.getApellidoPaterno() + " " + Cl5.getApellidoMaterno()),
(Moto4.getMarca() + " " + Moto4.getModelo()),
(Vend5.getNombre() + " " + Vend5.getApellidoPaterno() + " " + Vend5.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Moto4.existencia) - 1
Moto4.decrementaExist(menos)
elif op2 == 5:
if int(Moto5.getNumExistencia()) == 0:
print ("No hay {0} {1} disponibles por el momento.SRRY".format(Moto5.getMarca(), Moto5.getModelo()))
input("Pulse Enter para continuar")
else:
venta = agencia.ventaVehiculo((Cl2.getNombre() + " " + Cl2.getApellidoPaterno() + " " + Cl2.getApellidoMaterno()),
(Moto5.getMarca() + " " + Moto5.getModelo()),
(Vend2.getNombre() + " " + Vend2.getApellidoPaterno() + " " + Vend2.getApellidoMaterno()))
listaVenta.append(venta)
menos = int(Moto5.existencia) - 1
Moto5.decrementaExist(menos)
# Opcion 4 crea un submenú para saber que tipo de vehiculo quieres comprar
elif opcion == 4:
print("\n"*25 + "¿Qué deseas comprar?")
print("1: Automovil.")
print("2: Camion.")
print("3: Motocicleta.")
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op = 10
else:
op = int(cond)
# Un submenu para cada caso
if op == 1:
print("\n"*25 + "¿Qué auto deseas comprar?")
print("1: {0} {1}".format(Aut1.getMarca(), Aut1.getModelo()))
print("2: {0} {1}".format(Aut2.getMarca(), Aut2.getModelo()))
print("3: {0} {1}".format(Aut3.getMarca(), Aut3.getModelo()))
print("4: {0} {1}".format(Aut4.getMarca(), Aut4.getModelo()))
print("5: {0} {1}".format(Aut5.getMarca(), Aut5.getModelo()))
op2 = int(input(""))
if op2 == 1:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Aut1.getMarca() + " " + Aut1.getModelo()))
listaCompra.append(compra)
nvo = int(Aut1.existencia) + n
Aut1.incrementaExist(str(nvo))
elif op2 == 2:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Aut2.getMarca() + " " + Aut2.getModelo()))
listaCompra.append(compra)
nvo = int(Aut2.existencia) + n
Aut2.incrementaExist(str(nvo))
elif op2 == 3:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Aut3.getMarca() + " " + Aut3.getModelo()))
listaCompra.append(compra)
nvo = int(Aut3.existencia) + n
Aut3.incrementaExist(str(nvo))
elif op2 == 4:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Aut4.getMarca() + " " + Aut4.getModelo()))
listaCompra.append(compra)
nvo = int(Aut4.existencia) + n
Aut4.incrementaExist(str(nvo))
elif op2 == 5:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Aut5.getMarca() + " " + Aut5.getModelo()))
listaCompra.append(compra)
nvo = int(Aut5.existencia) + n
Aut5.incrementaExist(str(nvo))
elif op == 2:
print("\n"*25 + "¿Qué camion deseas comprar?")
print("1: {0} {1}".format(Cam1.getMarca(), Cam1.getModelo()))
print("2: {0} {1}".format(Cam2.getMarca(), Cam2.getModelo()))
print("3: {0} {1}".format(Cam3.getMarca(), Cam3.getModelo()))
print("4: {0} {1}".format(Cam4.getMarca(), Cam4.getModelo()))
print("5: {0} {1}".format(Cam5.getMarca(), Cam5.getModelo()))
op2 = int(input(""))
if op2 == 1:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Cam1.getMarca() + " " + Cam1.getModelo()))
listaCompra.append(compra)
nvo = int(Cam1.existencia) + n
Cam1.incrementaExist(str(nvo))
elif op2 == 2:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Cam2.getMarca() + " " + Cam2.getModelo()))
listaCompra.append(compra)
nvo = int(Cam2.existencia) + n
Cam2.incrementaExist(str(nvo))
elif op2 == 3:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Cam3.getMarca() + " " + Cam3.getModelo()))
listaCompra.append(compra)
nvo = int(Cam3.existencia) + n
Cam3.incrementaExist(str(nvo))
elif op2 == 4:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Cam4.getMarca() + " " + Cam4.getModelo()))
listaCompra.append(compra)
nvo = int(Cam4.existencia) + n
Cam4.incrementaExist(str(nvo))
elif op2 == 5:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Cam5.getMarca() + " " + Cam5.getModelo()))
listaCompra.append(compra)
nvo = int(Cam5.existencia) + n
Cam5.incrementaExist(str(nvo))
elif op == 3:
print("\n"*25 + "¿Qué motocicleta deseas comprar?")
print("1: {0} {1}".format(Moto1.getMarca(), Moto1.getModelo()))
print("2: {0} {1}".format(Moto2.getMarca(), Moto2.getModelo()))
print("3: {0} {1}".format(Moto3.getMarca(), Moto3.getModelo()))
print("4: {0} {1}".format(Moto4.getMarca(), Moto4.getModelo()))
print("5: {0} {1}".format(Moto5.getMarca(), Moto5.getModelo()))
op2 = int(input(""))
if op2 == 1:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Moto1.getMarca() + " " + Moto1.getModelo()))
listaCompra.append(compra)
nvo = int(Moto1.existencia) + n
Moto1.incrementaExist(str(nvo))
elif op2 == 2:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Moto2.getMarca() + " " + Moto2.getModelo()))
listaCompra.append(compra)
nvo = int(Moto2.existencia) + n
Moto2.incrementaExist(str(nvo))
elif op2 == 3:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Moto3.getMarca() + " " + Moto3.getModelo()))
listaCompra.append(compra)
nvo = int(Moto3.existencia) + n
Moto3.incrementaExist(str(nvo))
elif op2 == 4:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Moto4.getMarca() + " " + Moto4.getModelo()))
listaCompra.append(compra)
nvo = int(Moto4.existencia) + n
Moto4.incrementaExist(str(nvo))
elif op2 == 5:
n = int(input("Cantidad a comprar: "))
compra = agencia.compraVehiculo(n, (Moto5.getMarca() + " " + Moto5.getModelo()))
listaCompra.append(compra)
nvo = int(Moto5.existencia) + n
Moto5.incrementaExist(str(nvo))
# Opcion 5 muestra los datos de los clientes
# usando el metodo informeCliente
elif opcion == 5:
print("\n"*25)
print(Cl1.informeCliente() + "\n" + "="*80)
print(Cl2.informeCliente() + "\n" + "="*80)
print(Cl3.informeCliente() + "\n" + "="*80)
print(Cl4.informeCliente() + "\n" + "="*80)
print(Cl5.informeCliente() + "\n" + "="*80)
input("Pulse Enter para continuar")
# Opcion 6 muestra los datos de los vendedores
# usando el metodo informeVendedor
elif opcion == 6:
print("\n"*25)
print(Vend1.informeVendedor() + "\n" + "="*80)
print(Vend2.informeVendedor() + "\n" + "="*80)
print(Vend3.informeVendedor() + "\n" + "="*80)
print(Vend4.informeVendedor() + "\n" + "="*80)
print(Vend5.informeVendedor() + "\n" + "="*80)
input("Pulse Enter para continuar")
# Opcion 7 muestra un submenu para saber que tipo de reporte deseas ver
elif opcion == 7:
print("\n"*25 + "Reporte de: ")
print("1: Compras.")
print("2: Ventas.")
cond = input("")
if cond == "":
print ("Favor de seleccionar una opcion")
input("Pulse Enter para continuar")
op = 10
else:
op = int(cond)
# Opcion 1 muestra los datos de listaCompra
if op == 1:
print("\n"*25)
for x in range(len(listaCompra)):
print("Compra {0}:\n{1}".format(x+1,listaCompra[x]))
print("="*80)
input("Pulse Enter para continuar")
# Opcion 2 muestra los datos de listaVenta
elif op == 2:
print("\n"*25)
for x in range(len(listaVenta)):
print("Venta {0}:\n{1}".format(x+1,listaVenta[x]))
print("="*80)
input("Pulse Enter para continuar")
| {
"repo_name": "AnhellO/DAS_Sistemas",
"path": "Ago-Dic-2017/Cuauhtémoc Martínez/Practica1/main.py",
"copies": "2",
"size": "30579",
"license": "mit",
"hash": 1218829685314259700,
"line_mean": 50.2516778523,
"line_max": 150,
"alpha_frac": 0.5196097689,
"autogenerated": false,
"ratio": 2.986799647990613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4506409416890613,
"avg_score": null,
"num_lines": null
} |
archivo = open("lista.txt","a")
from imports import *
class Main:
def __init__(self,ancho = 800,alto = 600,tit = "grafiCAR"):
self.ventana = Ventana(ancho,alto,tit)
self.fondo = Fondo(ancho,alto)
self.pista = Pista()
self.auto = Auto(color ='rojo',pista = self.pista,posx = 400,posy = 200)
self.evento = Evento()
self.menu = Menu(ancho,alto)
self.obstaculos = Obstaculos([Obstaculo(randint(225,565),randint(100,600),randint(10,40)) for i in range(5)])
self.casas = Casas([Casa(randint(100,200),randint(100,600),randint(25,40)) for i in range(4)])
self.casas.casas += [Casa(randint(600,800),randint(100,600),randint(25,40)) for i in range(4)]
self.arboles = Arboles([Arbol(randint(100,400),randint(100,400),20) for i in range(5)] )
timer = pygame.time.Clock()
self.loop = True
onMenu = True
nameDecided = False
difficultyDecided = False
hardness = ["facil","medio","dificil"]
id = 0
string = ""
while onMenu:
self.evento.update()
while not difficultyDecided:
self.evento.update()
if self.evento.nameFinished(): difficultyDecided = True
self.menu.update()
self.auto.update()
drawText("seleccione la dificultad a y d y presione Tab", 150, 400)
drawText(hardness[id], 350, 100)
if self.evento.rotate() > 0: id= (id+1)%3
elif self.evento.rotate() < 0:
id = (id-1)%3
self.ventana.update()
while not nameDecided:
self.evento.update()
if self.evento.nameFinished(): nameDecided = True
else:
string += self.evento.charReturner()
self.menu.update()
self.auto.update()
drawText("ingrese su nombre y presione Tab", 150, 400)
self.ventana.update()
self.jugador = Jugador(string)
onMenu = self.evento.check_exit()
while self.loop:
self.evento.update()
self.loop =not self.evento.brake()
onMenu = self.loop
self.menu.update()
self.auto.update()
drawText("Escoja el color de su auto con las teclas a y d", 150, 400)
self.auto.changeColor(self.evento.rotate())
drawText("Presione s para quedarse con el color actual",150,100)
self.ventana.update()
self.loop = True
self.auto = Auto(color = self.auto.color,pista = self.pista)
while self.loop and self.jugador.hp > 0:
self.evento.update()
self.loop = self.evento.check_exit()
self.auto.colicionando(self.obstaculos,self.arboles,self.jugador)
if self.evento.forward(): self.auto.acelerar()
self.auto.rotate(self.evento.rotate())
if self.evento.brake(): self.auto.brake()
self.fondo.update()
self.pista.update()
if randint(1,1000)%321 == 0 or (id == 2 and randint(1,1000) % 117 ==0) or (id == 2 and randint(1,1000) %17 == 0 ):
self.obstaculos.adRandom()
self.jugador.updatePoints(750*self.auto.vel)
self.obstaculos.update()
self.casas.update()
self.arboles.update()
self.auto.update()
drawText("name: " + self.jugador.getName(),600,170)
drawText("hp : " + str(self.jugador.hp),600,145)
drawText("score: " + str(self.jugador.get_highscore()),600,122)
drawText("speed: " + str(self.auto.getvel()), 600,95)
drawText("time: " + str(pygame.time.get_ticks()/1000.0),600,70)
##drawText("best lap: no laps yet",500,40)
self.ventana.update()
self.ventana.destroy()
print "Puntaje Final: " + str(self.jugador.get_highscore())
archivo.write(self.jugador.nombre + " : " + str(self.jugador.get_highscore()) + "\n")
def printAuto(self):
print self.auto
m = Main()
| {
"repo_name": "bsubercaseaux/dcc",
"path": "Modelación y Computación Gráfica/graficaAutos/main.py",
"copies": "1",
"size": "4188",
"license": "mit",
"hash": -8667539528363265000,
"line_mean": 43.5531914894,
"line_max": 126,
"alpha_frac": 0.5475167144,
"autogenerated": false,
"ratio": 3.3665594855305465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9334189912322913,
"avg_score": 0.015977257521526636,
"num_lines": 94
} |
"""archvyrt domain module"""
# stdlib
import logging
# 3rd-party
import libvirt
# archvyrt
from archvyrt.libvirt import LibvirtDomain
from archvyrt.libvirt import LibvirtDisk
from archvyrt.libvirt import LibvirtNetwork
from archvyrt.libvirt import LibvirtRng
LOG = logging.getLogger(__name__)
class Domain:
"""
High-level domain object
"""
def __init__(self, domain_info, libvirt_url=None):
"""
Initialize libvirt domain
:param domain_info - JSON definition of domain
:param libvirt_url - URL for libvirt connection
"""
self._conn = libvirt.open(libvirt_url)
self._domain_info = domain_info
self._domain = LibvirtDomain(self.fqdn)
self._domain.memory = int(self.memory)
self._domain.vcpu = int(self.vcpu)
self._disks = []
self._init_disks()
self._networks = []
self._init_networks()
self._init_rng()
self._conn.defineXML(str(self._domain))
self._domain.xml = self._conn.lookupByName(self.fqdn).XMLDesc()
LOG.info('New domain %s', self.fqdn)
LOG.debug(
'Define new domain %s: %s',
self.fqdn,
str(self._domain).replace('\n', ' ').replace('\r', '')
)
def __del__(self):
"""
Make sure to cleanup connection when object is destroyed
"""
try:
if self._conn:
try:
self._conn.close()
except libvirt.libvirtError:
pass
except libvirt.libvirtError:
pass
def _init_disks(self):
"""
Initialize disks
will create libvirt disks and attach them to the domain
"""
for alias, details in sorted(self._domain_info['disks'].items()):
disk_name = '%s-%s' % (self.fqdn, alias)
self._disks.append(
LibvirtDisk(
self._conn,
disk_name,
alias,
**details
)
)
for disk in self._disks:
self._domain.add_device(disk.xml)
LOG.debug('Add disk %s to domain %s', disk.name, self.fqdn)
def _init_networks(self):
"""
Initialize networks
"""
for alias, details in sorted(self._domain_info['networks'].items()):
self._networks.append(
LibvirtNetwork(
alias,
**details
)
)
for network in self._networks:
self._domain.add_device(network.xml)
LOG.debug('Add network %s to domain %s', network.name, self.fqdn)
def _init_rng(self):
"""Initialize rng"""
if 'rng' in self._domain_info:
rng_bytes = self._domain_info['rng'].get('bytes', 2048)
rng = LibvirtRng(rng_bytes=rng_bytes)
self._domain.add_device(rng.xml)
LOG.debug('Add rng to domain %s', self.fqdn)
def start(self):
"""
Start domain
Warning: Will not check if the domain is provisioned yet...
"""
domain = self._conn.lookupByName(self.fqdn)
domain.create()
def stop(self):
"""
Stop domain
"""
domain = self._conn.lookupByName(self.fqdn)
domain.destroy()
def autostart(self, autostart):
"""
Set autostart option of domain
:param autostart - True/False
"""
domain = self._conn.lookupByName(self.fqdn)
domain.setAutostart(autostart)
@property
def sshkeys(self):
"""
sshkeys (from JSON representation)
"""
if self._domain_info.get('access', {}):
return self._domain_info.get('access').get('ssh-keys', {})
return None
@property
def password(self):
"""
password (encrypted, salted hash from JSON representation)
"""
if self._domain_info.get('access', {}):
return self._domain_info.get('access').get('password', None)
return None
@property
def guesttype(self):
"""
Type of domain (archlinux, plain, ...)
"""
return self._domain_info.get('guesttype')
@property
def disks(self):
"""
Disks attached to this domain
"""
return self._disks
@property
def networks(self):
"""
Networks attached to this domain
"""
return self._networks
@property
def fqdn(self):
"""
FQDN of this domain
"""
return self._domain_info.get('fqdn')
@property
def hostname(self):
"""
hostname of this domain
"""
return self._domain_info.get('hostname')
@property
def memory(self):
"""
Memory (in MB) of this domain
"""
return self._domain_info.get('memory')
@property
def vcpu(self):
"""
Number of virtual cpus for this domain
"""
return self._domain_info.get('vcpu')
@property
def xml(self):
"""
Libvirt XML for this domain (provisioned state)
"""
return self._domain.xml
| {
"repo_name": "andrekeller/archvyrt",
"path": "archvyrt/domain.py",
"copies": "1",
"size": "5271",
"license": "mit",
"hash": -721414896864102300,
"line_mean": 25.223880597,
"line_max": 77,
"alpha_frac": 0.5249478277,
"autogenerated": false,
"ratio": 4.345424567188788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 201
} |
"""archvyrt libvirt device module"""
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom
class LibvirtXml:
"""Libvirt device"""
def __init__(self):
self._xml = ElementTree.Element('root')
@staticmethod
def format_xml(et_xml):
"""
Return a pretty formatted XML
:param et_xml - ElementTree XML object
"""
reparsed = xml.dom.minidom.parseString(
ElementTree.tostring(et_xml, encoding='unicode')
)
return reparsed.toprettyxml(indent=" ").strip()
def __str__(self):
"""
Return a pretty formatted XML representation of this disk
"""
return self.format_xml(self.xml)
@property
def xml(self):
"""
XML representation of this network device
"""
return self._xml
@xml.setter
def xml(self, value):
"""
Update XML for interface
"""
if isinstance(value, str):
self._xml = ElementTree.fromstring(value)
elif isinstance(value, ElementTree.Element):
self._xml = value
else:
raise TypeError('Expected str or ElementTree got %s' % type(value))
| {
"repo_name": "andrekeller/archvyrt",
"path": "archvyrt/libvirt/xml.py",
"copies": "1",
"size": "1210",
"license": "mit",
"hash": -8244610049041430000,
"line_mean": 24.2083333333,
"line_max": 79,
"alpha_frac": 0.5760330579,
"autogenerated": false,
"ratio": 4.531835205992509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 48
} |
"""archvyrt libvirt disk module"""
# stdlib
import logging
import re
import xml.etree.ElementTree as ElementTree
# 3rd-party
import libvirt
# archvyrt
from .xml import LibvirtXml
LOG = logging.getLogger(__name__)
class LibvirtDisk(LibvirtXml):
"""
LibVirt Disk device object.
"""
def __init__(self, conn, name, alias, **kwargs):
"""
Initialie a libvirt disk.
This will create a Qcow2 image file and its libvirt XML representation.
:param conn - Libvirt connection (already established)
:param name - Name of the virtual disk
:param alias - Short name of the virtual disk
:param kwargs - Additional properties of the disk:
pool - Storage pool name
fstype - Type of filesystem
target - Target device in guest (vda, vdb, ...)
mountpoint - Where to mount the disk in the guest
capacity - Disk capacity in GB
"""
super().__init__()
self._alias = alias
self._name = name
self._properties = kwargs
lv_pool = conn.storagePoolLookupByName(self.pool)
lv_pool.createXML(
self._volume_xml(),
libvirt.VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA
)
lv_volume = lv_pool.storageVolLookupByName(self.name)
self._path = lv_volume.path()
self._xml = ElementTree.Element('disk')
self._xml.attrib['type'] = 'file'
self._xml.attrib['device'] = 'disk'
driver_element = ElementTree.Element('driver')
driver_element.attrib['name'] = 'qemu'
driver_element.attrib['type'] = 'qcow2'
self._xml.append(driver_element)
target_element = ElementTree.Element('target')
target_element.attrib['dev'] = self.target
target_element.attrib['bus'] = 'virtio'
self._xml.append(target_element)
source_element = ElementTree.Element('source')
source_element.attrib['file'] = self.path
self._xml.append(source_element)
alias_element = ElementTree.Element('alias')
alias_element.attrib['name'] = 'virtio-%s' % self.alias
self._xml.append(alias_element)
LOG.debug("Define virtual disk %s (%s bytes)", self.name, self.capacity)
def _volume_xml(self):
"""
Generate Libvirt Volume XML, to create the actual Qcow2 image
"""
volume_xml = ElementTree.Element('volume')
name_element = ElementTree.Element('name')
name_element.text = self.name
volume_xml.append(name_element)
capacity_element = ElementTree.Element('capacity')
capacity_element.text = self.capacity
volume_xml.append(capacity_element)
allocation_element = ElementTree.Element('allocation')
allocation_element.text = self.capacity
volume_xml.append(allocation_element)
target_element = ElementTree.Element('target')
format_element = ElementTree.Element('format')
format_element.attrib['type'] = 'qcow2'
target_element.append(format_element)
volume_xml.append(target_element)
return self.format_xml(volume_xml)
@property
def mountpoint(self):
"""
Where to mount this disk in the guest
"""
return self._properties.get('mountpoint')
@property
def fstype(self):
"""
Filesystemi this disk (ext4, swap...) will hold
"""
return self._properties.get('fstype')
@property
def alias(self):
"""
Short name for this disk
"""
return self._alias
@property
def number(self):
"""
Disk number, assumes alias is numbered (f.e. disk0, disk1, etc.)
"""
return re.match(r'^.*?([0-9]+)$', self._alias).groups()[0]
@property
def capacity(self):
"""
Disk capacity in bytes
"""
return str(int(self._properties.get('capacity')) * 1073741824)
@property
def name(self):
"""
Full disk name, including qcow2 suffix
"""
return '%s.qcow2' % self._name
@property
def path(self):
"""
Path to backing volume file of this disk
"""
return self._path
@property
def pool(self):
"""
Name of this disks storage pool
"""
return self._properties.get('pool')
@property
def target(self):
"""
Target (guest) device name for this disk (vda, vdb, vdc...)
"""
return self._properties.get('target')
| {
"repo_name": "andrekeller/archvyrt",
"path": "archvyrt/libvirt/disk.py",
"copies": "1",
"size": "4635",
"license": "mit",
"hash": 5141158112735128000,
"line_mean": 29.4934210526,
"line_max": 80,
"alpha_frac": 0.5816612729,
"autogenerated": false,
"ratio": 4.28373382624769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.536539509914769,
"avg_score": null,
"num_lines": null
} |
"""archvyrt libvirt domain module"""
# stdlib
import logging
import xml.etree.ElementTree as ElementTree
# archvyrt
from .xml import LibvirtXml
LOG = logging.getLogger(__name__)
class LibvirtDomain(LibvirtXml):
"""
Libvirt Domain object
"""
def __init__(self, name):
"""
Initialize domain, including default hardware and behaviour
:param name - FQDN of domain
"""
super().__init__()
self._xml = ElementTree.Element('domain')
self._xml.attrib['type'] = 'kvm'
self.name = name
self._set_default_behaviour()
self._set_default_hardware()
def _set_default_behaviour(self):
"""
Setup behaviour on start, stop and reboot.
"""
poweroff_element = ElementTree.Element('on_poweroff')
poweroff_element.text = 'destroy'
self._xml.append(poweroff_element)
reboot_element = ElementTree.Element('on_reboot')
reboot_element.text = 'restart'
self._xml.append(reboot_element)
crash_element = ElementTree.Element('on_crash')
crash_element.text = 'destroy'
self._xml.append(crash_element)
def _set_default_hardware(self):
"""
Setup default hardware, such as clock, display, memory etc.
"""
self._set_clock()
self._set_features()
self._set_memory_backing()
self._set_os()
self._set_resource_partition()
self._set_devices()
def _set_clock(self):
"""
Setup clock device
"""
clock_element = ElementTree.Element('clock')
clock_element.attrib['offset'] = 'utc'
self._xml.append(clock_element)
def _set_devices(self):
"""
Setup default devices, such as emulator, console and input devices
"""
devices_element = ElementTree.Element('devices')
emulator_element = ElementTree.Element('emulator')
emulator_element.text = '/usr/bin/qemu-system-x86_64'
devices_element.append(emulator_element)
devices_element.append(self.__prepare_serial_devices())
devices_element.append(self.__prepare_console_devices())
keyboard_element = ElementTree.Element('input')
keyboard_element.attrib['type'] = 'keyboard'
keyboard_element.attrib['bus'] = 'ps2'
devices_element.append(keyboard_element)
mouse_element = ElementTree.Element('input')
mouse_element.attrib['type'] = 'mouse'
mouse_element.attrib['bus'] = 'ps2'
devices_element.append(mouse_element)
devices_element.append(self.__prepare_graphics_devices())
devices_element.append(self.__prepare_video_devices())
devices_element.append(self.__prepare_memballoon_devices())
self._xml.append(devices_element)
@staticmethod
def __prepare_console_devices():
"""
Prepare console devices
"""
console_element = ElementTree.Element('console')
console_element.attrib['type'] = 'pty'
console_element.attrib['tty'] = '/dev/pts/10'
source_element = ElementTree.Element('source')
source_element.attrib['path'] = '/dev/pts/10'
console_element.append(source_element)
target_element = ElementTree.Element('target')
target_element.attrib['type'] = 'serial'
target_element.attrib['port'] = str(0)
console_element.append(target_element)
alias_element = ElementTree.Element('alias')
alias_element.attrib['name'] = 'serial0'
console_element.append(alias_element)
return console_element
@staticmethod
def __prepare_graphics_devices():
"""
Prepare graphic devices.
Sets up graphics and spice server for graphical console in libvirt
"""
graphics_element = ElementTree.Element('graphics')
graphics_element.attrib['type'] = 'spice'
graphics_element.attrib['port'] = '5900'
graphics_element.attrib['autoport'] = 'yes'
graphics_element.attrib['listen'] = '127.0.0.1'
listen_element = ElementTree.Element('listen')
listen_element.attrib['type'] = 'address'
listen_element.attrib['address'] = '127.0.0.1'
graphics_element.append(listen_element)
return graphics_element
@staticmethod
def __prepare_memballoon_devices():
"""
Setup memory balloning device
"""
memballoon_element = ElementTree.Element('memballoon')
memballoon_element.attrib['model'] = 'virtio'
alias_element = ElementTree.Element('alias')
alias_element.attrib['name'] = 'balloon0'
memballoon_element.append(alias_element)
return memballoon_element
@staticmethod
def __prepare_serial_devices():
"""
Setup serial devices
"""
serial_element = ElementTree.Element('serial')
serial_element.attrib['type'] = 'pty'
source_element = ElementTree.Element('source')
source_element.attrib['path'] = '/dev/pts/10'
serial_element.append(source_element)
target_element = ElementTree.Element('target')
target_element.attrib['port'] = str(0)
serial_element.append(target_element)
alias_element = ElementTree.Element('alias')
alias_element.attrib['name'] = 'serial0'
serial_element.append(alias_element)
return serial_element
@staticmethod
def __prepare_video_devices():
"""
Setup virtualized graphic card
"""
video_element = ElementTree.Element('video')
model_element = ElementTree.Element('model')
model_element.attrib['type'] = 'virtio'
model_element.attrib['heads'] = '1'
video_element.append(model_element)
alias_element = ElementTree.Element('alias')
alias_element.attrib['name'] = 'video0'
video_element.append(alias_element)
return video_element
def _set_features(self):
"""
Setup machine features such as ACPI and PAE
"""
features_element = ElementTree.Element('features')
acpi_element = ElementTree.Element('acpi')
apic_element = ElementTree.Element('apic')
pae_element = ElementTree.Element('pae')
features_element.append(acpi_element)
features_element.append(apic_element)
features_element.append(pae_element)
self._xml.append(features_element)
def _set_memory_backing(self):
"""
Setup hugepages memory backend, for improved performance
"""
memorybacking_element = ElementTree.Element('memoryBacking')
hugepages_element = ElementTree.Element('hugepages')
memorybacking_element.append(hugepages_element)
self._xml.append(memorybacking_element)
def _set_os(self, arch='x86_64'):
"""
Set OS/architecture specific configuration
"""
os_element = ElementTree.Element('os')
type_element = ElementTree.Element('type')
type_element.attrib['arch'] = arch
type_element.attrib['machine'] = 'pc'
type_element.text = 'hvm'
os_element.append(type_element)
boot_element = ElementTree.Element('boot')
boot_element.attrib['dev'] = 'hd'
os_element.append(boot_element)
self._xml.append(os_element)
def _set_resource_partition(self):
"""
Setup default resource partitioning
"""
resource_element = ElementTree.Element('resource')
partition_element = ElementTree.Element('partition')
partition_element.text = '/machine'
resource_element.append(partition_element)
self._xml.append(resource_element)
def add_device(self, device_xml):
"""
Add additional device to this libvirt domain.
:param device_xml - XML of device to add
"""
devices_node = self._xml.find('devices')
devices_node.append(device_xml)
@property
def vcpu(self):
"""
Number of virtual CPUs
"""
return self._xml.find('vcpu').text
@vcpu.setter
def vcpu(self, value):
"""
Number of virtual CPUs
"""
vcpu_element = self._xml.find('vcpu')
if isinstance(vcpu_element, ElementTree.Element):
vcpu_element.text = str(value)
else:
vcpu_element = ElementTree.Element('vcpu')
vcpu_element.attrib['placement'] = 'static'
vcpu_element.text = str(value)
self._xml.append(vcpu_element)
@property
def memory(self):
"""
Memory in MegaBytes
"""
return self._xml.find('memory').text
@memory.setter
def memory(self, value):
"""
Memory in MegaBytes
"""
memory_element = self._xml.find('memory')
if isinstance(memory_element, ElementTree.Element):
memory_element.text = str(int(value) * 1024)
else:
memory_element = ElementTree.Element('memory')
memory_element.attrib['unit'] = 'KiB'
memory_element.text = str(int(value) * 1024)
self._xml.append(memory_element)
cmemory_element = self._xml.find('currentMemory')
if isinstance(cmemory_element, ElementTree.Element):
cmemory_element.text = str(int(value) * 1024)
else:
cmemory_element = ElementTree.Element('currentMemory')
cmemory_element.attrib['unit'] = 'KiB'
cmemory_element.text = str(int(value) * 1024)
self._xml.append(cmemory_element)
@property
def name(self):
"""
Name of libvirt domain
"""
return self._xml.find('name').text
@name.setter
def name(self, value):
"""
Name of libvirt domain
"""
name_element = self._xml.find('name')
if isinstance(name_element, ElementTree.Element):
name_element.text = value
else:
name_element = ElementTree.Element('name')
name_element.text = value
self._xml.append(name_element)
| {
"repo_name": "andrekeller/archvyrt",
"path": "archvyrt/libvirt/domain.py",
"copies": "1",
"size": "10140",
"license": "mit",
"hash": -7416143646161850000,
"line_mean": 33.6075085324,
"line_max": 74,
"alpha_frac": 0.6039447732,
"autogenerated": false,
"ratio": 4.184894758563764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5288839531763764,
"avg_score": null,
"num_lines": null
} |
"""archvyrt libvirt network module"""
# stdlib
import ipaddress
import logging
import xml.etree.ElementTree as ElementTree
# archvyrt
from .xml import LibvirtXml
LOG = logging.getLogger(__name__)
class LibvirtNetwork(LibvirtXml):
"""
Libvirt Network device object
"""
def __init__(self, name, **kwargs):
"""
Build XML representation
:param name - Short name of this network device (eth0, eth1, ...)
"""
super().__init__()
try:
self._ipv4 = kwargs.get('ipv4')
except KeyError:
self._ipv4 = None
try:
self._ipv6 = kwargs.get('ipv6')
except KeyError:
self._ipv6 = None
self._name = name
self._vlan = kwargs.get('vlan')
self._bridge = kwargs.get('bridge')
self._xml = ElementTree.Element('interface')
self._xml.attrib['type'] = 'bridge'
source_element = ElementTree.Element('source')
source_element.attrib['bridge'] = self._bridge
self._xml.append(source_element)
if self.vlan:
vlan_element = ElementTree.Element('vlan')
tag_element = ElementTree.Element('tag')
tag_element.attrib['id'] = str(self.vlan)
vlan_element.append(tag_element)
self._xml.append(vlan_element)
virtualport_element = ElementTree.Element('virtualport')
virtualport_element.attrib['type'] = 'openvswitch'
self._xml.append(virtualport_element)
model_element = ElementTree.Element('model')
model_element.attrib['type'] = 'virtio'
self._xml.append(model_element)
@property
def name(self):
"""
Name of this network device (eth0, eth1, ...)
"""
return self._name
@property
def netctl(self):
"""
Netctl configuration representation of this network device
(returns a list, each element representing a line)
"""
config = list()
config.append('Description="%s network"' % self.name)
config.append('Interface=%s' % self.name)
config.append('Connection=ethernet')
if self.ipv4_address:
config.append('IP=static')
config.append("Address=('%s')" % self.ipv4_address.with_prefixlen)
if self.ipv4_gateway:
config.append("Gateway='%s'" % str(self.ipv4_gateway))
else:
config.append('IP=no')
if self.ipv6_address:
config.append('IP6=static')
config.append("Address6=('%s')" % self.ipv6_address.with_prefixlen)
if self.ipv6_gateway:
config.append("Gateway6='%s'" % str(self.ipv6_gateway))
else:
config.append('IP6=no')
if self.dns:
dns = []
for server in self.dns:
dns.append("'%s'" % str(server))
config.append('DNS=(%s)' % " ".join(dns))
return config
@property
def interfaces(self):
"""
Debian-style interfaces representation of this network device
(returns a list, each element representing a line)
"""
config = list()
config.append('auto %s' % self.name)
if not self.ipv4_address and not self.ipv6_address:
config.append('iface %s inet manual' % self.name)
config.append(' up ifconfig %s up' % self.name)
else:
if self.ipv4_address:
config.append('iface %s inet static' % self.name)
config.append(' address %s' % self.ipv4_address.ip)
config.append(' netmask %s' % self.ipv4_address.with_prefixlen.split('/', 1)[1])
if self.ipv4_gateway:
config.append(' gateway %s' % str(self.ipv4_gateway))
if self.ipv6_address:
config.append('iface %s inet6 static' % self.name)
config.append(' address %s' % self.ipv6_address.ip)
config.append(' netmask %s' % self.ipv6_address.with_prefixlen.split('/', 1)[1])
if self.ipv6_gateway:
config.append(' gateway %s' % str(self.ipv6_gateway))
return config
@property
def dns(self):
"""
DNS servers configured for this network (returns a list)
"""
dns = []
try:
for server in self._ipv4.get('dns', []):
dns.append(ipaddress.ip_address(server))
for server in self._ipv6.get('dns', []):
dns.append(ipaddress.ip_address(server))
except AttributeError:
pass
return dns
@property
def ipv4_address(self):
"""
IPv4 address for this interface
"""
try:
return ipaddress.ip_interface(self._ipv4['address'])
except (KeyError, ValueError, TypeError):
return None
@property
def ipv4_gateway(self):
"""
IPv4 default gateway for this interface
"""
try:
return ipaddress.ip_address(self._ipv4['gateway'])
except (KeyError, ValueError, TypeError):
return None
@property
def ipv6_address(self):
"""
IPv6 address for this interface
"""
try:
return ipaddress.ip_interface(self._ipv6['address'])
except (KeyError, ValueError, TypeError):
return None
@property
def ipv6_gateway(self):
"""
IPv6 default gateway for this interface
"""
try:
return ipaddress.ip_address(self._ipv6['gateway'])
except (KeyError, ValueError, TypeError):
return None
@property
def bridge(self):
"""
Bridge, this interface will be a member of
"""
return self._bridge
@property
def vlan(self):
"""
VLAN tag used on the bridge of this interface
"""
return self._vlan
@property
def mac(self):
"""
MAC address of this interface
"""
try:
return self.xml.find('mac').attrib['address']
except (KeyError, TypeError):
return None
| {
"repo_name": "andrekeller/archvyrt",
"path": "archvyrt/libvirt/network.py",
"copies": "1",
"size": "6212",
"license": "mit",
"hash": -1522431048245795600,
"line_mean": 29.9054726368,
"line_max": 97,
"alpha_frac": 0.546522859,
"autogenerated": false,
"ratio": 4.28118538938663,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.532770824838663,
"avg_score": null,
"num_lines": null
} |
"""archvyrt setup module"""
from pathlib import Path
import re
from setuptools import setup, find_packages
def find_version(source_file):
"""read __version__ from source file"""
with open(source_file) as version_file:
version_match = re.search(r"^__version__\s*=\s* ['\"]([^'\"]*)['\"]",
version_file.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find package version')
setup(
name='archvyrt',
version=find_version(str(Path('./archvyrt/version.py'))),
description='libvirt provisioner for archlinux libvirt hosts',
url='https://github.com/andrekeller/archvyrt',
author='Andre Keller',
author_email='ak@0x2a.io',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Topic :: System :: Systems Administration',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
packages=find_packages(),
install_requires=[
'libvirt-python'
],
python_requires='>=3.4',
entry_points={
'console_scripts': [
'archvyrt = archvyrt:main',
],
},
)
| {
"repo_name": "andrekeller/archvyrt",
"path": "setup.py",
"copies": "1",
"size": "1280",
"license": "mit",
"hash": -7701418998391370000,
"line_mean": 27.4444444444,
"line_max": 77,
"alpha_frac": 0.58828125,
"autogenerated": false,
"ratio": 3.902439024390244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4990720274390244,
"avg_score": null,
"num_lines": null
} |
import time
from reqs.twisted.internet import reactor
from core.constants import *
from core.decorators import *
from core.plugins import ProtocolPlugin
class BlockTrackerPlugin(ProtocolPlugin):
commands = {
"checkblock": "commandCheckBlock",
"checkplayer": "commandCheckPlayer",
"restoreplayer": "commandRestorePlayer",
"cb": "commandCheckBlock",
"cp": "commandCheckPlayer",
"rp": "commandRestorePlayer"
}
hooks = {
"blockchange": "blockChanged"
}
def gotClient(self):
self.isChecking = False
self.num = 0
def sendCallbackRestorePlayer(self, data):
j = len(data)
done = []
if self.num is "all":
for i in range(j):
done.append(data.pop())
i = len(done)
elif j > self.num:
for i in range(self.num):
done.append(data.pop())
done.reverse()
i = len(done)
else:
done = data
i = len(data)
world = self.client.world
try:
name = done[0][3].encode("ascii", "ignore")
except Exception:
self.client.sendServerMessage("No edits could be found for that player!")
else:
self.client.sendServerMessage("Reverting %s edits for %s (out of %s)..." % (i, name, j))
for element in done:
offset, before, after, player, date = element
x, y, z = world.get_coords(offset)
world[x, y, z] = chr(before)
self.client.queueTask(TASK_BLOCKSET, (x, y, z, before), world=world)
self.client.sendBlock(x, y, z, before)
self.client.sendServerMessage("Reverted %s edits." % i)
self.num = 0
def sendCallbackPlayer(self, data):
if len(data) > 10:
done = []
for i in range(10):
done.append(data.pop())
done.reverse()
else:
done = data
try:
name = done[0][3].encode("ascii", "ignore")
except Exception:
self.client.sendServerMessage("No edits could be found for that player!")
else:
self.client.sendServerMessage("Listing last %s edits for %s (out of %s)..." % (len(done), name, len(data)))
for element in done:
offset, before, after, player, date = element
date = time.strftime("%d/%m %H:%M:%S", time.gmtime(date))
coords = self.client.world.get_coords(offset)
self.client.sendServerMessage("[%s] (%s, %s, %s) %s -> %s" % (date, coords[0], coords[1], coords[2], before, after))
def sendCallbackBlock(self, data):
if len(data) > 10:
done = []
for i in range(10):
done.append(data.pop())
done.reverse()
else:
done = data
try:
name = done[0][3].encode("ascii", "ignore")
except Exception:
self.client.sendServerMessage("No edits could be found for that block!")
else:
self.client.sendServerMessage("Listing last %s edits (out of %s)..." % (len(done), len(data)))
for element in done:
offset, before, after, player, date = element
date = time.strftime("%d/%m %H:%M:%S", time.gmtime(date))
#coords = self.client.world.get_coords(offset)
#self.client.sendServerMessage("[%s] (%s, %s, %s) %s: %s -> %s" % (date, coords[0], coords[1], coords[2], player.encode("ascii", "ignore"), before, after))
self.client.sendServerMessage("[%s] %s: %s -> %s" % (date, player.encode("ascii", "ignore"), before, after))
def blockChanged(self, x, y, z, block, selected_block, fromloc):
# Note: block is what the user placed, selected_block is what their client program has selected (which can be overriden by stuff like /paint)
if self.isChecking:
edits = self.client.world.blocktracker.getblockedits(self.client.world.get_offset(x, y, z))
edits.addCallback(self.sendCallbackBlock)
self.isChecking = False
block = self.client.world.blockstore.__getitem__((x, y, z))
if block == u'':
block = 0
else:
block = ord(block)
return block
else:
before_block = self.client.world.blockstore.__getitem__((x, y, z))
if before_block == u'':
before_block = 0
else:
before_block = ord(before_block)
self.client.world.blocktracker.add((self.client.world.get_offset(x, y, z), before_block, block, self.client.username.lower(), time.mktime(time.localtime())))
@build_list
@op_only
def commandCheckBlock(self, parts, fromloc, overriderank):
"/checkblock - Op\nAliases: cb\nChecks the next edited block for past edits"
if not self.isChecking:
self.client.sendServerMessage("Checking for edits: Place or remove a block!")
self.isChecking = True
else:
self.client.sendServerMessage("Already checking for edits: Place or remove a block!")
@build_list
@op_only
def commandCheckPlayer(self, parts, fromloc, overriderank):
"/checkplayer playername [before|after|all] [blocktype] - Op\nAliases: cp\nChecks a player's edits on this world.\nSpecify 'before' and 'after' with the block type to show edits\nabout that type of block only."
if len(parts) >= 1:
filter = "all"
block = "all"
if len(parts) >= 3:
if parts[2].lower() not in ["all", "before", "after"]:
self.client.sendServerMessage("Please specify 'before', 'after' or 'all'.")
return
else:
filter = parts[2].lower()
if filter != "all":
if len(parts) >= 4:
block = self.client.GetBlockValue(parts[3])
if block == None:
return
else:
self.client.sendServerMessage("You need to specify the block type to view, or specify 'all'.")
return
edits = self.client.world.blocktracker.getplayeredits(parts[1].lower(), filter, block)
edits.addCallback(self.sendCallbackPlayer)
else:
self.client.sendServerMessage("Syntax: /checkplayer playername [before|after|all] [blocktype]")
@build_list
@mod_only
def commandRestorePlayer(self, parts, fromloc, overriderank):
"/restoreplayer username n - Mod\n: Reverse n edits on the current world by username."
if len(parts) > 2:
if parts[2] != "all":
try:
self.num = int(parts[2])
except Exception:
self.client.sendServerMessage("n must be a number or \"all\"!")
else:
if self.num > 0:
edits = self.client.world.blocktracker.getplayeredits(parts[1])
edits.addCallback(self.sendCallbackRestorePlayer)
else:
self.client.sendServerMessage("n must be greater than 0!")
else:
self.num = "all"
edits = self.client.world.blocktracker.getplayeredits(parts[1])
edits.addCallback(self.sendCallbackRestorePlayer)
else:
self.client.sendServerMessage("Syntax: /restoreplayer playername number|all")
| {
"repo_name": "TheArchives/Nexus",
"path": "core/plugins/blocktracker.py",
"copies": "1",
"size": "7910",
"license": "bsd-2-clause",
"hash": 5917905945966233000,
"line_mean": 42.9444444444,
"line_max": 218,
"alpha_frac": 0.5524652339,
"autogenerated": false,
"ratio": 4.019308943089431,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5071774176989431,
"avg_score": null,
"num_lines": null
} |
"""arc_net URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from redsun.views import SearchResult
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^home/',SearchResult,name="home"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"repo_name": "rsunder10/PopularityBased-SearchEngine",
"path": "arc_net/arc_net/urls.py",
"copies": "1",
"size": "1106",
"license": "mit",
"hash": 1553591569808762600,
"line_mean": 37.1379310345,
"line_max": 82,
"alpha_frac": 0.7224231465,
"autogenerated": false,
"ratio": 3.5562700964630225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47786932429630224,
"avg_score": null,
"num_lines": null
} |
"""arco
Revision ID: 014
Revises: 013
Create Date: 2014-05-27 22:32:33.767037
"""
# revision identifiers, used by Alembic.
revision = '015'
down_revision = '014'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('arco',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('id_usuario', sa.Integer, sa.ForeignKey('usuario.id')),
sa.Column('nombre', sa.String(1024), nullable=False),
sa.Column('comentario', sa.Text),
sa.Column('foto_path', sa.Text),
sa.Column('tipo_arco', sa.String(255), nullable=False),
sa.Column('draw', sa.Float),
sa.Column('id_marca_barra_larga_estabilizacion', sa.Integer, sa.ForeignKey('marca_estabilizacion.id')),
sa.Column('modelo_barra_larga_estabilizacion', sa.String(1024)),
sa.Column('largo_barra_larga_estabilizacion', sa.Integer),
sa.Column('peso_adicional_barra_larga', sa.Integer),
sa.Column('id_marca_barra_lateral_estabilizacion', sa.Integer, sa.ForeignKey('marca_estabilizacion.id')),
sa.Column('modelo_barra_lateral_estabilizacion', sa.String(1024)),
sa.Column('largo_barra_lateral_estabilizacion', sa.Integer),
sa.Column('peso_adicional_barra_lateral', sa.Integer),
sa.Column('id_marca_extender_estabilizacion', sa.Integer, sa.ForeignKey('marca_estabilizacion.id')),
sa.Column('modelo_extender_estabilizacion', sa.String(1024)),
sa.Column('largo_extender_estabilizacion', sa.Integer),
sa.Column('modelo_vbar_estabilizacion', sa.String(1024)),
sa.Column('vbar_angulo_apertura', sa.Integer),
sa.Column('vbar_angulo_inclinacion', sa.Integer),
sa.Column('modelo_rest', sa.String(1024))
)
def downgrade():
op.drop_table('arco')
| {
"repo_name": "tzulberti/entrenamiento-arqueria",
"path": "alembic/versions/015_arco.py",
"copies": "1",
"size": "1795",
"license": "mit",
"hash": -8416695667464776000,
"line_mean": 34.9,
"line_max": 113,
"alpha_frac": 0.6640668524,
"autogenerated": false,
"ratio": 2.9718543046357615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9122901001145982,
"avg_score": 0.002604031177955768,
"num_lines": 50
} |
"""arco recurvado
Revision ID: 015
Revises: 015
Create Date: 2014-05-27 22:36:06.936698
"""
# revision identifiers, used by Alembic.
revision = '016'
down_revision = '015'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('arco_recurvado',
sa.Column('id', sa.Integer, sa.ForeignKey('arco.id'), primary_key=True),
sa.Column('id_marca_riser', sa.Integer, sa.ForeignKey('marca_riser.id')),
sa.Column('modelo_riser', sa.String(1024)),
sa.Column('id_largo_riser', sa.Integer, sa.ForeignKey('largo_riser.id')),
sa.Column('id_tipo_encastre', sa.Integer, sa.ForeignKey('tipo_encastre.id')),
sa.Column('usa_barras_cortas', sa.Boolean, nullable=False, default=False),
sa.Column('id_marca_palas', sa.Integer, sa.ForeignKey('marca_palas.id')),
sa.Column('modelo_palas', sa.String(1024)),
sa.Column('libraje_palas', sa.Integer),
sa.Column('libraje_real', sa.Integer),
sa.Column('id_largo_palas', sa.Integer, sa.ForeignKey('largo_palas.id')),
sa.Column('usa_honguitos', sa.Boolean, nullable=False, default=False),
sa.Column('tiller', sa.Float),
sa.Column('brace', sa.Float),
sa.Column('altura_nocking_point', sa.Float),
sa.Column('modelo_clicker', sa.String(1024)),
sa.Column('id_marca_mira', sa.Integer, sa.ForeignKey('marca_mira.id')),
sa.Column('modelo_mira', sa.String(1024)),
sa.Column('usa_peewees', sa.Boolean, nullable=False, default=False),
sa.Column('modelo_cushion_plunger', sa.String(1024)),
sa.Column('id_tipo_hilo_cuerda', sa.Integer, sa.ForeignKey('tipo_hilo_cuerda.id')),
sa.Column('cantidad_hilos_cuerda', sa.Integer),
sa.Column('largo_cuerda', sa.Integer),
sa.Column('cantidad_vueltas_cuerda', sa.Integer),
)
def downgrade():
op.drop_table('arco_recurvado')
| {
"repo_name": "tzulberti/entrenamiento-arqueria",
"path": "alembic/versions/016_arco_recurvado.py",
"copies": "1",
"size": "1902",
"license": "mit",
"hash": 8549167772033783000,
"line_mean": 39.4680851064,
"line_max": 91,
"alpha_frac": 0.6456361725,
"autogenerated": false,
"ratio": 2.908256880733945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9033343857666815,
"avg_score": 0.004109839113425981,
"num_lines": 47
} |
"""Arcrest is a Python binding to the ArcGIS REST Server API, similar to the
JavaScript or Flex API in program structure as well as in the way it
interfaces with ArcGIS servers.
Getting Started with Arcrest
============================
A simple example of connecting to a server:
>>> import arcrest
>>> catalog = arcrest.Catalog("http://sampleserver1.arcgisonline.com/arcgis/rest/services")
>>> catalog.services
[<GeometryServer ('http://sampleserver1.arcgisonline.com/arcgis/rest/services/Geometry/GeometryServer/?f=json')>]
Getting a service from a catalog:
>>> locator = catalog.Locators.ESRI_Geocode_USA
>>> locator.url
'http://sampleserver1.arcgisonline.com/arcgis/rest/services/Locators/ESRI_Geocode_USA/GeocodeServer/?f=json'
>>> locator.FindAddressCandidates(Address="380 NEW YORK ST", City="Redlands", State="CA")
<FindAddressCandidatesResult('http://sampleserver1.arcgisonline.com/arcgis/rest/services/Locators/ESRI_Geocode_USA/GeocodeServe...')>
>>> candidates.candidates[0]
{'attributes': {}, 'score': 81, 'location': POINT(-117.19568 34.05752), 'address': '380 NEW YORK ST, REDLANDS, CA, 92373'}
Getting a service from a URL:
>>> service = arcrest.GPService("http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_Currents_World/GPServer")
>>> service.url
'http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_Currents_World/GPServer/?f=json'
Inspecting and executing a geoprocessing service:
>>> service.tasks
[<GPTask('http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_Currents_World/GPServer...')>]
Getting a task by name:
>>> task = service.MessageInABottle
>>> task.url
'http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/Specialty/ESRI_Currents_World/GPServer/MessageInABottle/?f=json'
>>> task.synchronous
True
>>> task.name
'MessageInABottle'
Inspecting tasks:
>>> [(param['name'], param['dataType']) for param in task.parameters if param['direction'] == 'esriGPParameterDirectionInput']
[('Input_Point', 'GPFeatureRecordSetLayer'), ('Days', 'GPDouble')]
Executing a job:
>>> results = service.MessageInABottle(arcrest.Point(-11, 38, arcrest.projections.geographic.GCS_WGS_1984), 2)
>>> import time
>>> while results.running:
... time.sleep(0.25)
...
>>> results.Output.features
[{'geometry': MULTILINESTRING((-11.00000 38.00000,-10.85327 37.73851,-10.83942 37.71683,-10.83734 37.71359,-10.83702 37.71310,-10.83697 37.71303)), 'attributes': {'shape_length': 0.330091748127675, 'fid': 1, 'fnode_': 0}}]
"""
from arcrest.geometry import *
from arcrest.gptypes import *
from arcrest.server import *
from arcrest.projections import projected, geographic
| {
"repo_name": "Esri/arcpy-server-util-rest",
"path": "arcrest/__init__.py",
"copies": "1",
"size": "2974",
"license": "apache-2.0",
"hash": 1107823540656010100,
"line_mean": 43.0606060606,
"line_max": 228,
"alpha_frac": 0.668796234,
"autogenerated": false,
"ratio": 3.387243735763098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4556039969763098,
"avg_score": null,
"num_lines": null
} |
""" Arc-specific Vector provider helpers.
"""
from operator import add
from ..Core import KnownUnknown
from ..py3_compat import reduce
geometry_types = {
'Point': 'esriGeometryPoint',
'LineString': 'esriGeometryPolyline',
'Polygon': 'esriGeometryPolygon',
'MultiPoint': 'esriGeometryMultipoint',
'MultiLineString': 'esriGeometryPolyline',
'MultiPolygon': 'esriGeometryPolygon'
}
class _amfFeatureSet(dict):
""" Registered PyAMF class for com.esri.ags.tasks.FeatureSet
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/FeatureSet.html
"""
def __init__(self, spatial_reference, geometry_type, features):
self.spatialReference = spatial_reference
self.geometryType = geometry_type
self.features = features
dict.__init__(self, {'geometryType': geometry_type,
'spatialReference': spatial_reference,
'features': features})
class _amfSpatialReference(dict):
""" Registered PyAMF class for com.esri.ags.SpatialReference
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/SpatialReference.html
"""
def __init__(self, wkid, wkt):
if wkid:
self.wkid = wkid
dict.__init__(self, {'wkid': wkid})
elif wkt:
self.wkt = wkt
dict.__init__(self, {'wkt': wkt})
class _amfFeature(dict):
""" Registered PyAMF class for com.esri.ags.Feature
No URL for class information - this class shows up in AMF responses
from ESRI webservices but does not seem to be otherwise documented.
"""
def __init__(self, attributes, geometry):
self.attributes = attributes
self.geometry = geometry
dict.__init__(self, {'attributes': attributes, 'geometry': geometry})
class _amfGeometryMapPoint(dict):
""" Registered PyAMF class for com.esri.ags.geometry.MapPoint
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/MapPoint.html
"""
def __init__(self, sref, x, y):
self.x = x
self.y = y
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'x': x, 'y': y})
class _amfGeometryPolyline(dict):
""" Registered PyAMF class for com.esri.ags.geometry.Polyline
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/Polyline.html
"""
def __init__(self, sref, paths):
self.paths = paths
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'paths': paths})
class _amfGeometryPolygon(dict):
""" Registered PyAMF class for com.esri.ags.geometry.Polygon
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/Polygon.html
"""
def __init__(self, sref, rings):
self.rings = rings
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'rings': rings})
pyamf_classes = {
_amfFeatureSet: 'com.esri.ags.tasks.FeatureSet',
_amfSpatialReference: 'com.esri.ags.SpatialReference',
_amfGeometryMapPoint: 'com.esri.ags.geometry.MapPoint',
_amfGeometryPolyline: 'com.esri.ags.geometry.Polyline',
_amfGeometryPolygon: 'com.esri.ags.geometry.Polygon',
_amfFeature: 'com.esri.ags.Feature'
}
def reserialize_to_arc(content, point_objects):
""" Convert from "geo" (GeoJSON) to ESRI's GeoServices REST serialization.
Second argument is a boolean flag for whether to use the class
_amfGeometryMapPoint for points in ring and path arrays, or tuples.
The formal class is needed for AMF responses, plain tuples otherwise.
Much of this cribbed from sample server queries and page 191+ of:
http://www.esri.com/library/whitepapers/pdfs/geoservices-rest-spec.pdf
"""
mapPointList = point_objects and _amfGeometryMapPoint or (lambda s, x, y: (x, y))
mapPointDict = point_objects and _amfGeometryMapPoint or (lambda s, x, y: {'x': x, 'y': y})
found_geometry_types = set([feat['geometry']['type'] for feat in content['features']])
found_geometry_types = set([geometry_types.get(type) for type in found_geometry_types])
if len(found_geometry_types) > 1:
raise KnownUnknown('Arc serialization needs a single geometry type, not ' + ', '.join(found_geometry_types))
crs = content['crs']
sref = _amfSpatialReference(crs.get('wkid', None), crs.get('wkt', None))
geometry_type, features = None, []
for feature in content['features']:
geometry = feature['geometry']
if geometry['type'] == 'Point':
arc_geometry = mapPointDict(sref, *geometry['coordinates'])
elif geometry['type'] == 'LineString':
path = geometry['coordinates']
paths = [[mapPointList(sref, *xy) for xy in path]]
arc_geometry = _amfGeometryPolyline(sref, paths)
elif geometry['type'] == 'Polygon':
rings = geometry['coordinates']
rings = [[mapPointList(sref, *xy) for xy in ring] for ring in rings]
arc_geometry = _amfGeometryPolygon(sref, rings)
elif geometry['type'] == 'MultiPoint':
points = geometry['coordinates']
points = [mapPointList(sref, *xy) for xy in points]
arc_geometry = {'points': points}
elif geometry['type'] == 'MultiLineString':
paths = geometry['coordinates']
paths = [[mapPointList(sref, *xy) for xy in path] for path in paths]
arc_geometry = _amfGeometryPolyline(sref, paths)
elif geometry['type'] == 'MultiPolygon':
rings = reduce(add, geometry['coordinates'])
rings = [[mapPointList(sref, *xy) for xy in ring] for ring in rings]
arc_geometry = _amfGeometryPolygon(sref, rings)
else:
raise Exception(geometry['type'])
arc_feature = _amfFeature(feature['properties'], arc_geometry)
geometry_type = geometry_types[geometry['type']]
features.append(arc_feature)
return _amfFeatureSet(sref, geometry_type, features)
| {
"repo_name": "TileStache/TileStache",
"path": "TileStache/Vector/Arc.py",
"copies": "1",
"size": "6091",
"license": "bsd-3-clause",
"hash": -4641867942046351000,
"line_mean": 38.0448717949,
"line_max": 116,
"alpha_frac": 0.6383188311,
"autogenerated": false,
"ratio": 3.653869226154769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9780645470893945,
"avg_score": 0.002308517272164812,
"num_lines": 156
} |
""" Arc-specific Vector provider helpers.
"""
from operator import add
from ..Core import KnownUnknown
geometry_types = {
'Point': 'esriGeometryPoint',
'LineString': 'esriGeometryPolyline',
'Polygon': 'esriGeometryPolygon',
'MultiPoint': 'esriGeometryMultipoint',
'MultiLineString': 'esriGeometryPolyline',
'MultiPolygon': 'esriGeometryPolygon'
}
class _amfFeatureSet(dict):
""" Registered PyAMF class for com.esri.ags.tasks.FeatureSet
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/FeatureSet.html
"""
def __init__(self, spatial_reference, geometry_type, features):
self.spatialReference = spatial_reference
self.geometryType = geometry_type
self.features = features
dict.__init__(self, {'geometryType': geometry_type,
'spatialReference': spatial_reference,
'features': features})
class _amfSpatialReference(dict):
""" Registered PyAMF class for com.esri.ags.SpatialReference
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/SpatialReference.html
"""
def __init__(self, wkid, wkt):
if wkid:
self.wkid = wkid
dict.__init__(self, {'wkid': wkid})
elif wkt:
self.wkt = wkt
dict.__init__(self, {'wkt': wkt})
class _amfFeature(dict):
""" Registered PyAMF class for com.esri.ags.Feature
No URL for class information - this class shows up in AMF responses
from ESRI webservices but does not seem to be otherwise documented.
"""
def __init__(self, attributes, geometry):
self.attributes = attributes
self.geometry = geometry
dict.__init__(self, {'attributes': attributes, 'geometry': geometry})
class _amfGeometryMapPoint(dict):
""" Registered PyAMF class for com.esri.ags.geometry.MapPoint
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/MapPoint.html
"""
def __init__(self, sref, x, y):
self.x = x
self.y = y
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'x': x, 'y': y})
class _amfGeometryPolyline(dict):
""" Registered PyAMF class for com.esri.ags.geometry.Polyline
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/Polyline.html
"""
def __init__(self, sref, paths):
self.paths = paths
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'paths': paths})
class _amfGeometryPolygon(dict):
""" Registered PyAMF class for com.esri.ags.geometry.Polygon
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/Polygon.html
"""
def __init__(self, sref, rings):
self.rings = rings
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'rings': rings})
pyamf_classes = {
_amfFeatureSet: 'com.esri.ags.tasks.FeatureSet',
_amfSpatialReference: 'com.esri.ags.SpatialReference',
_amfGeometryMapPoint: 'com.esri.ags.geometry.MapPoint',
_amfGeometryPolyline: 'com.esri.ags.geometry.Polyline',
_amfGeometryPolygon: 'com.esri.ags.geometry.Polygon',
_amfFeature: 'com.esri.ags.Feature'
}
def reserialize_to_arc(content, point_objects):
""" Convert from "geo" (GeoJSON) to ESRI's GeoServices REST serialization.
Second argument is a boolean flag for whether to use the class
_amfGeometryMapPoint for points in ring and path arrays, or tuples.
The formal class is needed for AMF responses, plain tuples otherwise.
Much of this cribbed from sample server queries and page 191+ of:
http://www.esri.com/library/whitepapers/pdfs/geoservices-rest-spec.pdf
"""
mapPointList = point_objects and _amfGeometryMapPoint or (lambda s, x, y: (x, y))
mapPointDict = point_objects and _amfGeometryMapPoint or (lambda s, x, y: {'x': x, 'y': y})
found_geometry_types = set([feat['geometry']['type'] for feat in content['features']])
found_geometry_types = set([geometry_types.get(type) for type in found_geometry_types])
if len(found_geometry_types) > 1:
raise KnownUnknown('Arc serialization needs a single geometry type, not ' + ', '.join(found_geometry_types))
crs = content['crs']
sref = _amfSpatialReference(crs.get('wkid', None), crs.get('wkt', None))
geometry_type, features = None, []
for feature in content['features']:
geometry = feature['geometry']
if geometry['type'] == 'Point':
arc_geometry = mapPointDict(sref, *geometry['coordinates'])
elif geometry['type'] == 'LineString':
path = geometry['coordinates']
paths = [[mapPointList(sref, *xy) for xy in path]]
arc_geometry = _amfGeometryPolyline(sref, paths)
elif geometry['type'] == 'Polygon':
rings = geometry['coordinates']
rings = [[mapPointList(sref, *xy) for xy in ring] for ring in rings]
arc_geometry = _amfGeometryPolygon(sref, rings)
elif geometry['type'] == 'MultiPoint':
points = geometry['coordinates']
points = [mapPointList(sref, *xy) for xy in points]
arc_geometry = {'points': points}
elif geometry['type'] == 'MultiLineString':
paths = geometry['coordinates']
paths = [[mapPointList(sref, *xy) for xy in path] for path in paths]
arc_geometry = _amfGeometryPolyline(sref, paths)
elif geometry['type'] == 'MultiPolygon':
rings = reduce(add, geometry['coordinates'])
rings = [[mapPointList(sref, *xy) for xy in ring] for ring in rings]
arc_geometry = _amfGeometryPolygon(sref, rings)
else:
raise Exception(geometry['type'])
arc_feature = _amfFeature(feature['properties'], arc_geometry)
geometry_type = geometry_types[geometry['type']]
features.append(arc_feature)
return _amfFeatureSet(sref, geometry_type, features)
| {
"repo_name": "Billups/TileStache",
"path": "TileStache/Vector/Arc.py",
"copies": "2",
"size": "6130",
"license": "bsd-3-clause",
"hash": -6593072711286386000,
"line_mean": 38.8051948052,
"line_max": 116,
"alpha_frac": 0.6301794454,
"autogenerated": false,
"ratio": 3.699456849728425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02008741576487691,
"num_lines": 154
} |
""" Arc-specific Vector provider helpers.
"""
from operator import add
from TileStache.Core import KnownUnknown
geometry_types = {
'Point': 'esriGeometryPoint',
'LineString': 'esriGeometryPolyline',
'Polygon': 'esriGeometryPolygon',
'MultiPoint': 'esriGeometryMultipoint',
'MultiLineString': 'esriGeometryPolyline',
'MultiPolygon': 'esriGeometryPolygon'
}
class _amfFeatureSet(dict):
""" Registered PyAMF class for com.esri.ags.tasks.FeatureSet
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/FeatureSet.html
"""
def __init__(self, spatial_reference, geometry_type, features):
self.spatialReference = spatial_reference
self.geometryType = geometry_type
self.features = features
dict.__init__(self, {'geometryType': geometry_type,
'spatialReference': spatial_reference,
'features': features})
class _amfSpatialReference(dict):
""" Registered PyAMF class for com.esri.ags.SpatialReference
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/SpatialReference.html
"""
def __init__(self, wkid, wkt):
if wkid:
self.wkid = wkid
dict.__init__(self, {'wkid': wkid})
elif wkt:
self.wkt = wkt
dict.__init__(self, {'wkt': wkt})
class _amfFeature(dict):
""" Registered PyAMF class for com.esri.ags.Feature
No URL for class information - this class shows up in AMF responses
from ESRI webservices but does not seem to be otherwise documented.
"""
def __init__(self, attributes, geometry):
self.attributes = attributes
self.geometry = geometry
dict.__init__(self, {'attributes': attributes, 'geometry': geometry})
class _amfGeometryMapPoint(dict):
""" Registered PyAMF class for com.esri.ags.geometry.MapPoint
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/MapPoint.html
"""
def __init__(self, sref, x, y):
self.x = x
self.y = y
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'x': x, 'y': y})
class _amfGeometryPolyline(dict):
""" Registered PyAMF class for com.esri.ags.geometry.Polyline
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/Polyline.html
"""
def __init__(self, sref, paths):
self.paths = paths
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'paths': paths})
class _amfGeometryPolygon(dict):
""" Registered PyAMF class for com.esri.ags.geometry.Polygon
http://help.arcgis.com/en/webapi/flex/apiref/com/esri/ags/geometry/Polygon.html
"""
def __init__(self, sref, rings):
self.rings = rings
self.spatialReference = sref
dict.__init__(self, {'spatialReference': sref, 'rings': rings})
pyamf_classes = {
_amfFeatureSet: 'com.esri.ags.tasks.FeatureSet',
_amfSpatialReference: 'com.esri.ags.SpatialReference',
_amfGeometryMapPoint: 'com.esri.ags.geometry.MapPoint',
_amfGeometryPolyline: 'com.esri.ags.geometry.Polyline',
_amfGeometryPolygon: 'com.esri.ags.geometry.Polygon',
_amfFeature: 'com.esri.ags.Feature'
}
def reserialize_to_arc(content, point_objects):
""" Convert from "geo" (GeoJSON) to ESRI's GeoServices REST serialization.
Second argument is a boolean flag for whether to use the class
_amfGeometryMapPoint for points in ring and path arrays, or tuples.
The formal class is needed for AMF responses, plain tuples otherwise.
Much of this cribbed from sample server queries and page 191+ of:
http://www.esri.com/library/whitepapers/pdfs/geoservices-rest-spec.pdf
"""
mapPointList = point_objects and _amfGeometryMapPoint or (lambda s, x, y: (x, y))
mapPointDict = point_objects and _amfGeometryMapPoint or (lambda s, x, y: {'x': x, 'y': y})
found_geometry_types = set([feat['geometry']['type'] for feat in content['features']])
found_geometry_types = set([geometry_types.get(type) for type in found_geometry_types])
if len(found_geometry_types) > 1:
raise KnownUnknown('Arc serialization needs a single geometry type, not ' + ', '.join(found_geometry_types))
crs = content['crs']
sref = _amfSpatialReference(crs.get('wkid', None), crs.get('wkt', None))
geometry_type, features = None, []
for feature in content['features']:
geometry = feature['geometry']
if geometry['type'] == 'Point':
arc_geometry = mapPointDict(sref, *geometry['coordinates'])
elif geometry['type'] == 'LineString':
path = geometry['coordinates']
paths = [[mapPointList(sref, *xy) for xy in path]]
arc_geometry = _amfGeometryPolyline(sref, paths)
elif geometry['type'] == 'Polygon':
rings = geometry['coordinates']
rings = [[mapPointList(sref, *xy) for xy in ring] for ring in rings]
arc_geometry = _amfGeometryPolygon(sref, rings)
elif geometry['type'] == 'MultiPoint':
points = geometry['coordinates']
points = [mapPointList(sref, *xy) for xy in points]
arc_geometry = {'points': points}
elif geometry['type'] == 'MultiLineString':
paths = geometry['coordinates']
paths = [[mapPointList(sref, *xy) for xy in path] for path in paths]
arc_geometry = _amfGeometryPolyline(sref, paths)
elif geometry['type'] == 'MultiPolygon':
rings = reduce(add, geometry['coordinates'])
rings = [[mapPointList(sref, *xy) for xy in ring] for ring in rings]
arc_geometry = _amfGeometryPolygon(sref, rings)
else:
raise Exception(geometry['type'])
arc_feature = _amfFeature(feature['properties'], arc_geometry)
geometry_type = geometry_types[geometry['type']]
features.append(arc_feature)
return _amfFeatureSet(sref, geometry_type, features)
| {
"repo_name": "robpvn/Vector-Tile-Research",
"path": "TileStache/Vector/Arc.py",
"copies": "13",
"size": "6139",
"license": "bsd-3-clause",
"hash": 7968451868734726000,
"line_mean": 38.8636363636,
"line_max": 116,
"alpha_frac": 0.6308845089,
"autogenerated": false,
"ratio": 3.695966285370259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02008741576487691,
"num_lines": 154
} |
#ArcSwath
#NB 08/2014
import arcpy as ap
import pythonaddins
import math
import numpy as np
import os as os
import numpy.ma as ma
import csv
from subprocess import Popen, PIPE
try:
import matplotlib.pyplot as plt
Matplotlibexists=True
except ImportError:
print ("NO mathplotlib module found. Plot can only be NONE. CSV will be written.")
Matplotlibexists=False
class ButtonClass9(object):
"""Implementation for SP3_addin.button (Button)"""
def __init__(self):
self.enabled = True
self.checked = False
if Matplotlibexists == True:
combobox_1.enabled=True
def onClick(self):
# Determining all variables from the combo/tool boxes.
x5 = tool.x5
y5 = tool.y5
x6 = tool_1.x6
y6 = tool_1.y6
W = combobox_2.W
if Matplotlibexists==True:
plot = combobox_1.plot
else:
plot == 0
if hasattr(combobox,'layer'):
r_in_layer = combobox.layer
print("r_in_layer.name: "+r_in_layer.name)
if hasattr(combobox_4,'layer'):
p_in_layer = combobox_4.layer
print("p_in_layer.name: "+p_in_layer.name)
if combobox_3.enabled:
increment = combobox_3.increment
if combobox_5.enabled:
pointfeature = combobox_5.feature
print("Starting process...")
# Calculating geometry from the two selected points.
# THIS WON'T WORK FOR A VERTICAL LINE
deltax= x6-x5
deltay= y6-y5
lineslope=deltay/deltax
lineangle = math.atan(lineslope)
#Point 1
x1 = x5 + math.cos(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
y1 = y5 - math.sin(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
#Point 2
x2 = x6 + math.cos(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
y2 = y6 - math.sin(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
#Point3
x3 = x6 - math.cos(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
y3 = y6 + math.sin(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
#Point4
x4 = x5 - math.cos(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
y4 = y5 + math.sin(math.radians(90-math.degrees(lineangle)))*(float(W)/2.)
#Make a Polygon
swathpoints = ap.Array()
swathpoints.add(ap.Point(x1, y1))
swathpoints.add(ap.Point(x2, y2))
swathpoints.add(ap.Point(x3, y3))
swathpoints.add(ap.Point(x4, y4))
swathpoints.add(ap.Point(x1, y1))
polygon = ap.Polygon(swathpoints)
# Check to see if the user selected a raster layer. If not, set r_layertype to be empty.
try:
r_in_layer
except NameError:
r_layertype=""
else:
r_desc = ap.Describe(r_in_layer)
r_layertype = r_desc.datasetType
#print('r_layertype: ' +r_layertype)
# Check to see if the user selected a point layer. If not, set p_layertype to be empty.
try:
p_in_layer
except NameError:
p_layertype=""
else:
p_desc = ap.Describe(p_in_layer)
p_layertype = p_desc.datasetType
#print ('point_layertype: '+p_layertype)
if r_layertype == "RasterDataset":
r_out_layer = r_in_layer.name[0:2] + "_swath"
# Change to working directory
workdir = r_in_layer.workspacePath
ap.env.workspace = workdir
os.chdir(workdir)
# Naming the output and checking that files with same name do not exist.
swathnum=""
for n in range(100):
r_out_layer_name = r_out_layer
if ap.Exists(r_out_layer_name):
swathnum=n+1
r_out_layer = r_in_layer.name[0:2] + "_swath" + str(swathnum)
else:
break
#This part is to deal with raster files
# Clipping data from input by polygon to output
ap.Clip_management(r_in_layer, "#", r_out_layer, polygon, "#", "ClippingGeometry")
# set length to fit the swathprofile (constant increments)
length= math.sqrt(((x6-x5)**2)+((y6-y5)**2))
ninc = int(length/float(increment))
if (np.mod(length,float(increment))>=1.0e-10):
ninc=ninc+1
increment=length/float(ninc)
# Use new version of code to calculate LastValue
# These arrays will be filled with data later on
swathdist=np.zeros(ninc)
swathmax=np.zeros(ninc)
swathmean=np.zeros(ninc)
swathmin=np.zeros(ninc)
#Does not work for vertical or horital line.
if lineslope*deltay>= 0.0 and deltax>= 0.0:
increment = -increment
# Taking one step at a time (increment), creating arrays and finding the max, mean, min values.
print("Calculating swath ranges...")
for n in range(ninc):
print("Processing row "+str(n+1)+" of "+str(ninc))
#print n
#PointA
xA = x1-(n)*float(increment)*math.cos(lineangle)
yA = y1-(n)*float(increment)*math.sin(lineangle)
#PointB
xB = x1-(n+1)*float(increment)*math.cos(lineangle)
yB = y1-(n+1)*float(increment)*math.sin(lineangle)
#PointC
xC = x4-(n+1)*float(increment)*math.cos(lineangle)
yC = y4-(n+1)*float(increment)*math.sin(lineangle)
#PointD
xD = x4-(n)*float(increment)*math.cos(lineangle)
yD = y4-(n)*float(increment)*math.sin(lineangle)
# Make the actual polygon
clippoints = ap.Array()
clippoints.add(ap.Point(xA, yA))
clippoints.add(ap.Point(xB, yB))
clippoints.add(ap.Point(xC, yC))
clippoints.add(ap.Point(xD, yD))
clippoints.add(ap.Point(xA, yA))
swathpolygon = ap.Polygon(clippoints)
ap.Delete_management("in_memory/swathclip")
# The actual clipping
ap.Clip_management(r_out_layer, "#", "in_memory/swathclip", swathpolygon, "#", "ClippingGeometry")
#Find the NoDataValue, Use in mask(later)
finddata = ap.Describe(r_in_layer)
nodata = finddata.noDataValue
# Convert the clipped area to Numpy Array
swatharray = ma.MaskedArray(ap.RasterToNumPyArray("in_memory/swathclip"))
swatharray.mask=(swatharray == nodata)#-32768)
swathdist[n] = abs(increment) * n
swathmax[n] = np.max(swatharray)
swathmean[n] = np.mean(swatharray)
swathmin[n] = np.min(swatharray)
# Write swath arrays to a CSV file
swathprofile = r_in_layer.name[0:2] + "_rasterswath"+str(swathnum)+".csv"
for n in range (100):
if os.path.exists(swathprofile):
swathprofile = r_in_layer.name[0:2] + "_rasterswath"+str(swathnum)+"-"+str(n+1)+".csv"
else:
break
fileout = open(swathprofile, 'w')
fileout.write('Point1: '+ ',' + str(x5) + ',' + str(y5) + ',' + 'Point2: '+ ',' + str(x6) + ',' + str(y6) +'\n')
fileout.write('Distance [m], Max. Elevation [m], Mean Elevation [m], Min. Elevation [m]\n')
for n in range(ninc):
currentrow = '{0:.3f},{1:.3f},{2:.3f},{3:.3f}\n'.format(swathdist[n], swathmax[n], swathmean[n], swathmin[n])
fileout.write(currentrow)
fileout.close()
# Test here for whether user wants to see the plot (comes from combobox in toolbar)
# If yes call show_plot("swathplot.py")
#if plot is wanted:
if plot == 1 or plot == 3:
fileout = open('C:\WorkSpace\plotdir.txt', 'w')
fileout.write(r_in_layer.workspacePath+"\n")
fileout.write(str(plot))
fileout.close()
if p_layertype == "FeatureClass":
#This part is for pointdata/shapefiles
p_out_layer = p_in_layer.name[0:2] + "_swath_pts"
# Change to working directory
workdir = p_in_layer.workspacePath
ap.env.workspace = workdir
os.chdir(workdir)
# Naming the output and checking that files with same name do not exist.
p_swathnum = ""
for n in range(100):
p_out_layer_name = p_out_layer+".shp"
if ap.Exists(p_out_layer_name):
p_swathnum=n+1
p_out_layer = p_in_layer.name[0:2] + "_swath_pts" + str(p_swathnum)
#print(out_layer+" in loop")
else:
break
#Clip points which are inside the polygon
ap.Clip_analysis(p_in_layer, polygon, p_out_layer)
# Shorter term for lineslope
k=lineslope
#Works at least with shapefiles
cutpointsfile = p_out_layer
describe = ap.Describe(cutpointsfile)
shapefieldname = describe.ShapeFieldName
rows = ap.SearchCursor(cutpointsfile)
pointarray = []
print ("pointfeature: " + str(pointfeature))
#Loop collects the values to csv-file.
print("Finding points in swath area...")
for row in rows:
# Create the geometry object 'feat'
feat = row.getValue(shapefieldname)
pnt = feat.getPart()
xp = pnt.X
yp = pnt.Y
# To change the data, check the name of the field from table
data = row.getValue(pointfeature)
xx = (xp +((k**2)*x5)+(yp*k)-(y5*k))/(k**2+1)
yx = k*(xx-x5)+y5
d = math.sqrt((xx-x5)**2+(yx-y5)**2)
pointarray.append([d, data, xx, yx])
# Write results into CSV file
pointdatafile = p_in_layer.name[0:2] + "_pointswath"+str(p_swathnum)+".csv"
for n in range(100):
if os.path.exists(pointdatafile):
pointdatafile = p_in_layer.name[0:2] + "_pointswath"+str(p_swathnum)+"-"+str(n+1)+".csv"
else:
break
openpointdata = open(pointdatafile, 'w')
openpointdata.write('Point1: '+ ',' + str(x5) + ',' + str(y5) + ',' + 'Point2: '+ ',' + str(x6) + ',' + str(y6) +'\n')
openpointdata.write('Distance [m],' + str(pointfeature) + "," + 'X-coordinate, Y-coordinate\n')
for n in pointarray:
datarow = str(n[0])+","+str(n[1])+","+str(n[2])+","+str(n[3])+"\n"
openpointdata.write(datarow)
openpointdata.close()
#Plotting pointdata
if plot == 2 or plot == 3:
pointout = open('C:\WorkSpace\plotpointdir.txt', 'w')
pointout.write(p_in_layer.workspacePath+"\n")
pointout.write(str(plot))
pointout.close()
# Plot the right way
if plot == 2:
fileout = open('C:\WorkSpace\plotdir.txt', 'w')
fileout.write("\n"+str(plot))
fileout.close()
if plot == 1:
pointout = open('C:\WorkSpace\plotpointdir.txt', 'w')
pointout.write("\n"+str(plot))
pointout.close()
if plot != 0:
show_plot("swathplot.py")
class ComboBoxClass5(object):
"""Implementation for SP3_addin.combobox (ComboBox)"""
def __init__(self):
self.editable = True
self.enabled = True
self.value = 'None'
def onSelChange(self, selection):
# When a new layer is selected diipa daapa
if selection == 'None':
del self.layer
if 'Points' in combobox_1.items and Matplotlibexists:
combobox_1.items=['None','Points']
combobox_3.enabled = False
else:
self.layer = ap.mapping.ListLayers(self.mxd, selection)[0]
combobox_3.enabled = True
if Matplotlibexists:
if 'Points' in combobox_1.items:
combobox_1.items=['None','Raster','Points','Raster+Points']
else:
combobox_1.items=['None','Raster']
# Error if not in UTM
r_desc = ap.Describe(selection)
r_spatialref = r_desc.spatialReference
if "UTM" not in r_spatialref.name:
print ('***ERROR***')
print ('Input raster file is not in metric UTM coordinate system. Use UTM coordinate system.')
button.enabled = False
else:
button.enabled = True
ap.RefreshActiveView()
def onEditChange(self, text):
pass
def onFocus(self, focused):
#When the combo box has focus, update the combo box with the list of layer names.
if focused:
self.mxd = ap.mapping.MapDocument('current')
layers = ap.mapping.ListLayers(self.mxd)
self.items = ['None']
for layer in layers:
self.items.append(layer.name)
def onEnter(self):
pass
def refresh(self):
pass
class ComboBoxClass39(object):
"""Implementation for SP3_addin.combobox_4 (ComboBox)"""
def __init__(self):
self.editable = True
self.enabled = True
self.value = 'None'
def onSelChange(self, selection):
# When a new layer is selected
#Different from the combobox 5 and has a "good" bug
if selection == 'None':
del self.layer
if Matplotlibexists==True:
if 'Raster' in combobox_1.items:
combobox_1.items=['None','Raster']
else:
combobox_1.items=['None']
combobox_5.enabled = False
else:
self.layer = ap.mapping.ListLayers(self.mxd, selection)[0]
combobox_5.enabled = True
if Matplotlibexists:
if 'Raster' in combobox_1.items:
combobox_1.items=['None','Raster','Points','Raster+Points']
else:
combobox_1.items=['None','Points']
p_desc = ap.Describe(selection)
p_spatialref = p_desc.spatialReference
if "UTM" not in p_spatialref.name:
print ('***ERROR***')
print ('Input point file is not in metric UTM coordinate system. Use UTM coordinate system.')
button.enabled = False
else:
button.enabled = True
ap.RefreshActiveView()
def onEditChange(self, text):
pass
def onFocus(self, focused):
#When the combo box has focus, update the combo box with the list of layer names.
if focused:
self.mxd = ap.mapping.MapDocument('current')
layers = ap.mapping.ListLayers(self.mxd)
self.items = ['None']
for layer in layers:
self.items.append(layer.name)
def onEnter(self):
pass
def refresh(self):
pass
class ComboBoxClass3(object):
"""Implementation for SP_addin.combobox_5 (ComboBox)"""
def __init__(self):
self.editable = True
self.enabled = False
def onSelChange(self, selection):
self.feature = selection
def onEditChange(self, text):
pass
def onFocus(self, focused):
# update combobox with point fieds
pfields = ap.Describe(combobox_4.layer).fields
self.items = []
for f in pfields:
self.items.append(f.baseName)
def onEnter(self):
pass
def refresh(self):
pass
class ComboBoxClass6(object):
"""Implementation for SP3_addin.combobox_1 (ComboBox)"""
def __init__(self):
self.editable = True
self.enabled = False
self.dropdownWidth = 'WWWWWW'
self.width = 'WWWWWW'
def onSelChange(self, selection):
if selection == "Raster":
self.plot=1
elif selection == "Points":
self.plot=2
elif selection == "Raster+Points":
self.plot=3
elif selection == "None":
self.plot=0
def onEditChange(self, text):
pass
def onFocus(self, focused):
pass
def onEnter(self):
pass
def refresh(self):
pass
class ComboBoxClass7(object):
"""Implementation for SP3_addin.combobox_2 (ComboBox)"""
def __init__(self):
self.items = [1000, 5000, 10000, 20000]
self.editable = True
self.enabled = True
self.dropdownWidth = 'WWWWWW'
self.width = 'WWWWWW'
def onSelChange(self, selection):
self.W = selection
def onEditChange(self, text):
self.W = text
def onFocus(self, focused):
pass
def onEnter(self):
pass
def refresh(self):
pass
class ComboBoxClass8(object):
"""Implementation for SP3_addin.combobox_3 (ComboBox)"""
def __init__(self):
self.items = [180, 360, 720, 1080, 1440, 1800]
self.editable = True
self.enabled = False
self.dropdownWidth = 'WWWWWW'
self.width = 'WWWWWW'
def onSelChange(self, selection):
self.increment = selection
def onEditChange(self, text):
self.increment = text
def onFocus(self, focused):
pass
def onEnter(self):
pass
def refresh(self):
pass
class ToolClass2(object):
"""Implementation for SP3_addin.tool (Tool)"""
def __init__(self):
self.enabled = True
self.shape = "NONE" # Can set to "Line", "Circle" or "Rectangle" for interactive shape drawing and to activate the onLine/Polygon/Circle event sinks.
self.cursor = 3 # Use crosshairs
def onMouseDownMap(self, x, y, button, shift):
self.x5 = x
self.y5 = y
print "Point 1: "+str(x)+","+str(y)
class ToolClass4(object):
"""Implementation for SP3_addin.tool_1 (Tool)"""
def __init__(self):
self.enabled = True
self.shape = "NONE" # Can set to "Line", "Circle" or "Rectangle" for interactive shape drawing and to activate the onLine/Polygon/Circle event sinks.
self.cursor = 3 # Use crosshairs
def onMouseDownMap(self, x, y, button, shift):
self.x6 = x
self.y6 = y
print "Point 2: "+str(x)+","+str(y)
def show_plot(file_name):
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)
proc = Popen(file_path, shell=True, stdout=PIPE, bufsize=1)
stdoutdata, stderrdata = proc.communicate()
return stdoutdata | {
"repo_name": "HUGG/ArcSwath",
"path": "Install/ArcSwath_addin.py",
"copies": "1",
"size": "19717",
"license": "mit",
"hash": -2680452891958736000,
"line_mean": 36.2741020794,
"line_max": 157,
"alpha_frac": 0.5259927981,
"autogenerated": false,
"ratio": 3.8027000964320155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48286928945320157,
"avg_score": null,
"num_lines": null
} |
"""Arcyd - daemon to watch git repos, create and land reviews automatically.
Intended to make it easy for large teams to start using Differential without
individual contributors needing to install and configure Arcanist.
Individual contributors are still free to use Arcanist if they wish, Arcyd
provides a zero-config layer over Git to get them started.
Arcyd does the following:
- watches for specially named branches and automatically creates revisions
- automatically updates revisions when the branch changes
- automatically lands revisions when they are approved
minimal user workflow:
$ git checkout feature/mywork
~ commit some work on the branch ~
$ git push origin feature/mywork:arcyd-review/mywork/master
.. Arcyd see's the 'arcyd-review' branch and creates a review ..
.. Reviewer accepts the change ..
.. Arcyd squashes the 'arcyd-review' branch onto master and deletes it ..
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_arcyd
#
# Public Functions:
# main
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import argparse
import phlsys_subcommand
import abdcmd_addphabricator
import abdcmd_addrepo
import abdcmd_addrepohost
import abdcmd_arcydstatushtml
import abdcmd_devstatushtml
import abdcmd_fetch
import abdcmd_fsck
import abdcmd_init
import abdcmd_instaweb
import abdcmd_listrepos
import abdcmd_repostatushtml
import abdcmd_rmrepo
import abdcmd_start
import abdcmd_stop
_USAGE_EXAMPLES = """
usage example:
To setup arcyd using the example accounts baked into the 'phabricator-tools'
vagrant/puppet installation. (see ./README)
$ mkdir arcyd
$ cd arcyd
$ arcyd init --arcyd-email arcyd@localhost
$ arcyd add-phabricator \\
--name local \\
--instance-uri http://127.0.0.1/api/ \\
--review-url-format 'http://127.0.0.1/D{review}' \\
--admin-emails 'local-phab-admin@localhost' \\
--arcyd-user phab \\
--arcyd-cert \\
xnh5tpatpfh4pff4tpnvdv74mh74zkmsualo4l6mx7bb262zqr55vcachxgz7ru3lrvafgzqu\
zl3geyjxw426ujcyqdi2t4ktiv7gmrtlnc3hsy2eqsmhvgifn2vah2uidj6u6hhhxo2j3y2w6lcseh\
s2le4msd5xsn4f333udwvj6aowokq5l2llvfsl3efcucraawtvzw462q2sxmryg5y5rpicdk3lyr3u\
vot7fxrotwpi3ty2b2sa2kvlpf
$ arcyd add-repohost \\
--name local_repos \\
--repo-url-format '/path/to/repos/{}'
$ arcyd add-repo \\
--name local_1 \\
--repo-url local_1 \\
--repo-desc local_1 \\
--phabricator-name local \\
--repohost-name local_repos \\
--admin-emails 'local-repo1-admin@localhost'
$ arcyd start
run each command with the '--help' option for more information, e.g.:
$ arcyd init --help
"""
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
epilog=_USAGE_EXAMPLES)
subparsers = parser.add_subparsers()
phlsys_subcommand.setup_parser(
"arcyd-status-html", abdcmd_arcydstatushtml, subparsers)
phlsys_subcommand.setup_parser(
"repo-status-html", abdcmd_repostatushtml, subparsers)
phlsys_subcommand.setup_parser(
"dev-status-html", abdcmd_devstatushtml, subparsers)
phlsys_subcommand.setup_parser(
"instaweb", abdcmd_instaweb, subparsers)
phlsys_subcommand.setup_parser(
"init", abdcmd_init, subparsers)
phlsys_subcommand.setup_parser(
"list-repos", abdcmd_listrepos, subparsers)
phlsys_subcommand.setup_parser(
"add-phabricator", abdcmd_addphabricator, subparsers)
phlsys_subcommand.setup_parser(
"add-repohost", abdcmd_addrepohost, subparsers)
phlsys_subcommand.setup_parser(
"add-repo", abdcmd_addrepo, subparsers)
phlsys_subcommand.setup_parser(
"rm-repo", abdcmd_rmrepo, subparsers)
phlsys_subcommand.setup_parser(
"start", abdcmd_start, subparsers)
phlsys_subcommand.setup_parser(
"stop", abdcmd_stop, subparsers)
phlsys_subcommand.setup_parser(
"fsck", abdcmd_fsck, subparsers)
phlsys_subcommand.setup_parser(
"fetch", abdcmd_fetch, subparsers)
args = parser.parse_args()
return args.func(args)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "valhallasw/phabricator-tools",
"path": "py/abd/abdcmd_arcyd.py",
"copies": "1",
"size": "5265",
"license": "apache-2.0",
"hash": -2177470798054003200,
"line_mean": 33.1883116883,
"line_max": 79,
"alpha_frac": 0.6554605888,
"autogenerated": false,
"ratio": 3.507661558960693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4663122147760693,
"avg_score": null,
"num_lines": null
} |
"""Arcyd operations that can be scheduled with phlsys_scheduleunreliables."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdi_operation
#
# Public Classes:
# Sleep
# .do
# RefreshCaches
# .do
# KillFileError
# CheckSpecialFiles
# .do
# CycleReportJson
# .do
# .getDelay
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import json
import logging
import os
import time
import phlsys_strtotime
import phlsys_subprocess
import phlsys_timer
import abdt_errident
import abdt_tryloop
_LOGGER = logging.getLogger(__name__)
class Sleep(object):
def __init__(self, secs, reporter):
self._secs = secs
self._reporter = reporter
def do(self):
sleep_remaining = self._secs
self._reporter.start_sleep(sleep_remaining)
while sleep_remaining > 0:
self._reporter.update_sleep(sleep_remaining)
time.sleep(1)
sleep_remaining -= 1
self._reporter.finish_sleep()
return True
class RefreshCaches(object):
def __init__(self, conduits, url_watcher, reporter):
super(RefreshCaches, self).__init__()
self._conduits = conduits
self._url_watcher = url_watcher
self._reporter = reporter
def do(self):
self._reporter.start_cache_refresh()
with self._reporter.tag_timer_context('refresh conduit cache'):
for key in self._conduits:
conduit = self._conduits[key]
abdt_tryloop.critical_tryloop(
conduit.refresh_cache_on_cycle,
abdt_errident.CONDUIT_REFRESH,
conduit.describe())
with self._reporter.tag_timer_context('refresh git watcher'):
abdt_tryloop.critical_tryloop(
self._url_watcher.refresh, abdt_errident.GIT_SNOOP, '')
self._reporter.finish_cache_refresh()
return True
class KillFileError(Exception):
pass
class CheckSpecialFiles(object):
def __init__(self, kill_file):
self._kill_file = kill_file
def do(self):
if self._kill_file and os.path.isfile(self._kill_file):
os.remove(self._kill_file)
raise KillFileError("kill file: " + self._kill_file)
return True
class CycleReportJson(object):
"Pipes a json report object to stdin of 'report_command' every cycle."
def __init__(self, report_command):
self._report_command = report_command
self._timer = phlsys_timer.Timer()
self._timer.start()
strToTime = phlsys_strtotime.duration_string_to_time_delta
self._delays = [strToTime(d) for d in ["10 minutes", "1 hours"]]
def do(self):
report = {"cycle_time_secs": self._timer.restart()}
report_json = json.dumps(report)
try:
phlsys_subprocess.run(self._report_command, stdin=report_json)
except phlsys_subprocess.CalledProcessError as e:
_LOGGER.error("CycleReportJson: {}".format(e))
return False
return True
def getDelay(self):
delay = None
if self._delays:
delay = self._delays.pop(0)
return delay
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "valhallasw/phabricator-tools",
"path": "py/abd/abdi_operation.py",
"copies": "1",
"size": "4255",
"license": "apache-2.0",
"hash": 5974392326095310000,
"line_mean": 28.3448275862,
"line_max": 79,
"alpha_frac": 0.5673325499,
"autogenerated": false,
"ratio": 4.107142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5174475407042857,
"avg_score": null,
"num_lines": null
} |
"""Arcyd-specific interactions with the filesystem."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdt_fs
#
# Public Classes:
# Error
# Layout
# .phabricator_config
# .repohost_config
# .repo_config
# .repo_try
# .repo_ok
# .repo
# Accessor
# .set_pid
# .get_pid_or_none
# .create_root_config
# .create_phabricator_config
# .get_phabricator_config_rel_path
# .create_repohost_config
# .get_repohost_config_rel_path
# .create_repo_config
# .remove_repo_config
# .repo_name_list
# .repo_config_path_list
# .lockfile_context
# .layout
#
# Public Functions:
# make_default_accessor
# initialise_here
# raise_if_config_name_not_valid
# is_config_name_valid
#
# Public Assignments:
# CONFIG_NAME_REGEX
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import contextlib
import os
import re
import phlgit_commit
import phlgit_diffindex
import phlsys_fs
import phlsys_git
import phlsys_subprocess
# Restrict the names that may be used for config files for repos, phab
# instances, repo hosts. The names may be used in URLs and filenames, err on
# the side of caution when allowing characters.
#
# Only allow lowercase names so that we don't get tripped up by
# case-insensitive file systems.
#
# Check that all the characters are either dash, underscore, lowercase a-z or
# numbers 0-9. Require a match against the whole string (^$).
#
CONFIG_NAME_REGEX = '^[_a-z0-9-]+$'
_README = """
This is an Arcyd repository.
Run 'arcyd --help' for options.
""".strip()
_CONFIG_README = """
In this directory all the configuration is stored, from which all other
run-time data may be generated.
It is important that this directory is preserved and versioned so that it's
possible to see when and why things were changed.
""".strip()
_CONFIG_PHABRICATOR_README = """
In this directory all the phabricator instance configuration is stored.
It is important that this directory is preserved and versioned so that it's
possible to see when and why things were changed.
""".strip()
_CONFIG_REPOSITORY_README = """
In this directory all the repository configuration is stored.
It is important that this directory is preserved and versioned so that it's
possible to see when and why things were changed.
""".strip()
_VAR_README = """
In this directory all the repositories, logs and other run-time generated data
is stored.
It is safe to clean this directory when Arcyd is not running, you should save
any logs that you'd like to keep beforehand of course.
This is really a stand-in for using '/var' on the machine, this makes it
convenient to run arcyd where it can't be installed as root whilst keeping it
conceivable to move to a packaged install process later.
""".strip()
_VAR_REPO_README = """
This is where Arcyd keeps all the local clones of repositories that it is
managing.
""".strip()
_VAR_LOG_README = """
This is where Arcyd keeps all activity logs.
""".strip()
_VAR_STATUS_README = """
This is where Arcyd keeps all status information.
""".strip()
_VAR_COMMAND_README = """
This is where Arcyd looks for command files, e.g. to pause or stop.
""".strip()
_VAR_RUN_README = """
This is where Arcyd puts it's pidfile.
""".strip()
class Error(Exception):
pass
class Layout(object):
arcydroot = '.arcydroot'
root_config = 'configfile'
pid = 'var/run/arcyd.pid'
stdout = 'var/log/stdout'
stderr = 'var/log/stderr'
log_info = 'var/log/info'
phabricator_config_dir = 'config/repository'
repository_config_dir = 'config/repository'
urlwatcher_cache_path = '.arcyd.urlwatcher.cache'
lockfile = 'var/lockfile'
dir_run = 'var/run'
@staticmethod
def phabricator_config(name):
"""Return the string path to the phabricator config 'name'.
:name: string name of the new config, see CONFIG_NAME_REGEX
:returns: the string relative path of the new file
"""
return 'config/phabricator/{}'.format(name)
@staticmethod
def repohost_config(name):
"""Return the string path to the repohost config 'name'.
:name: string name of the new config, see CONFIG_NAME_REGEX
:returns: the string relative path of the new file
"""
return 'config/repohost/{}'.format(name)
@staticmethod
def repo_config(name):
"""Return the string path to the repo config 'name'.
:name: string name of the new config, see CONFIG_NAME_REGEX
:returns: the string relative path of the new file
"""
return 'config/repository/{}'.format(name)
@staticmethod
def repo_try(name):
"""Return the string path to the 'try' file for the repo."""
return "var/status/{}.try".format(name)
@staticmethod
def repo_ok(name):
"""Return the string path to the 'ok' file for the repo."""
return "var/status/{}.ok".format(name)
@staticmethod
def repo(name):
"""Return the string path to repo 'name'."""
return "var/repo/{}".format(name)
class Accessor(object):
def __init__(self, layout, path):
self._layout = layout
self._root = os.path.abspath(path)
self._repo = phlsys_git.Repo(path)
self._check_arcydroot()
def _make_abspath(self, relative_path):
"""Return a string of the absolute path to the file in the layout.
:relative_path: a string of the path relative to .arcydroot
:returns: a string of the absolute path
"""
return os.path.join(self._root, relative_path)
def _check_arcydroot(self):
arcydroot_path = self._make_abspath(self._layout.arcydroot)
if not os.path.exists(arcydroot_path):
raise Exception('did not find {}'.format(
self._layout.arcydroot))
def set_pid(self, pid):
"""Set the pid for the current arcyd instance.
:pid: the integer pid of the current arcyd instance
:returns: None
"""
pid_path = self._make_abspath(self._layout.pid)
phlsys_fs.write_text_file(pid_path, str(pid))
def get_pid_or_none(self):
"""Return the pid for the current arcyd instance.
:returns: the integer pid of the current arcyd instance or None
"""
pid = None
pid_path = self._make_abspath(self._layout.pid)
if os.path.isfile(pid_path):
with open(pid_path) as f:
pid = int(f.read())
return pid
def _create_config(self, rel_path, content, message):
"""Create and commit a new config file.
:rel_path: the string relative path to the config file
:content: the string contents of the new config file
:message: the string commit message for the file
"""
if phlgit_diffindex.is_index_dirty(self._repo):
print Error("git index has staged changes")
path = self._make_abspath(rel_path)
if os.path.exists(path):
raise Error("config already exists")
phlsys_fs.write_text_file(path, content)
self._repo('add', rel_path)
phlgit_commit.index(self._repo, message)
def _remove_config(self, rel_path, message):
"""Remove and commit the removal of an existing config file.
:rel_path: the string relative path to the config file
:message: the string commit message for the file
"""
if phlgit_diffindex.is_index_dirty(self._repo):
print Error("git index has staged changes")
path = self._make_abspath(rel_path)
if not os.path.exists(path):
raise Error("config doesn't exist: {}".format(rel_path))
self._repo('rm', rel_path)
phlgit_commit.index(self._repo, message)
def create_root_config(self, content):
"""Create and commit the root config file.
:content: the string content of the new config file
:returns: None
"""
rel_path = self._layout.root_config
self._create_config(rel_path, content, 'Create root config')
def create_phabricator_config(self, name, content):
"""Create a new phabricator config file.
:name: string name of the new config, see CONFIG_NAME_REGEX
:content: the string content of the new config file
:returns: None
"""
raise_if_config_name_not_valid(name)
rel_path = self._layout.phabricator_config(name)
self._create_config(
rel_path, content, 'Add phabricator config: {}'.format(name))
def get_phabricator_config_rel_path(self, name):
"""Return the string path for the phabricator config 'name'.
Raise Exception if the config does not exist.
:name: string name of the config, see CONFIG_NAME_REGEX
:returns: None
"""
rel_path = self._layout.phabricator_config(name)
path = self._make_abspath(rel_path)
if not os.path.isfile(path):
raise Exception('{} has no phabricator config'.format(name))
return rel_path
def create_repohost_config(self, name, content):
"""Create a new repohost config file.
:name: string name of the new config, see CONFIG_NAME_REGEX
:content: the string content of the new config file
:returns: None
"""
raise_if_config_name_not_valid(name)
rel_path = self._layout.repohost_config(name)
self._create_config(
rel_path, content, 'Add repohost config: {}'.format(name))
def get_repohost_config_rel_path(self, name):
"""Return the string path for the repohost config 'name'.
Raise Exception if the config does not exist.
:name: string name of the config, see CONFIG_NAME_REGEX
:returns: None
"""
rel_path = self._layout.repohost_config(name)
path = self._make_abspath(rel_path)
if not os.path.isfile(path):
raise Exception('{} has no repohost config'.format(name))
return rel_path
def create_repo_config(self, name, content):
"""Create a new repo config file.
:name: string name of the new config, see CONFIG_NAME_REGEX
:content: the string content of the new config file
:returns: None
"""
raise_if_config_name_not_valid(name)
rel_path = self._layout.repo_config(name)
self._create_config(
rel_path, content, 'Add repo config: {}'.format(name))
def remove_repo_config(self, name):
"""Remove an existing repo config file.
:name: string name of the existing config, see CONFIG_NAME_REGEX
:returns: None
"""
rel_path = self._layout.repo_config(name)
self._remove_config(
rel_path, 'Remove repo config: {}'.format(name))
def repo_name_list(self):
"""Return a list of string names of managed repositories.
:returns: list of string
"""
p = self.layout.repository_config_dir
return [r for r in os.listdir(p) if r != 'README']
def repo_config_path_list(self):
"""Return a list of string paths to repo configs.
:returns: list of string
"""
p = self.layout.repository_config_dir
return [os.path.join(p, r) for r in os.listdir(p) if r != 'README']
@contextlib.contextmanager
def lockfile_context(self):
retry_context = phlsys_fs.lockfile_retry_context
with retry_context(self.layout.lockfile, attempts=3, wait_secs=1):
yield
# TODO: raise some 'usage error' if the lockfile can't be acquired
@property
def layout(self):
return self._layout
def make_default_accessor():
"""Return an Accessor for the current directory, using Layout.
:returns: a new Accessor
"""
return Accessor(Layout(), '.')
def initialise_here():
"""Return a new default Accessor after initialising the current directory.
:returns: a new Accessor, mounted at the current directory
"""
layout = Layout()
phlsys_subprocess.run('git', 'init')
repo = phlsys_git.Repo('.')
# create filesystem hierarchy
phlsys_fs.write_text_file(layout.arcydroot, 'this dir is an arcydroot')
phlsys_fs.write_text_file('README', _README)
phlsys_fs.write_text_file('config/README', _CONFIG_README)
phlsys_fs.write_text_file(
'config/phabricator/README', _CONFIG_PHABRICATOR_README)
phlsys_fs.write_text_file(
'config/repository/README', _CONFIG_REPOSITORY_README)
phlsys_fs.write_text_file('var/README', _VAR_README)
phlsys_fs.write_text_file('var/repo/README', _VAR_REPO_README)
phlsys_fs.write_text_file('var/log/README', _VAR_LOG_README)
phlsys_fs.write_text_file('var/status/README', _VAR_STATUS_README)
phlsys_fs.write_text_file('var/command/README', _VAR_COMMAND_README)
phlsys_fs.write_text_file('var/run/README', _VAR_RUN_README)
repo('add', '.')
phlsys_fs.write_text_file('.gitignore', 'var\n')
repo('add', '.')
phlgit_commit.index(repo, 'Initialised new Arcyd instance')
return Accessor(Layout(), '.')
def raise_if_config_name_not_valid(name):
"""Raise an appropriate error if the supplied 'name' is not valid.
:name: a string candidate config name
:returns: None
"""
# XXX: We could add some helpful hints as to why it's not valid in the
# error message.
if not is_config_name_valid(name):
raise Error("config name is invalid: {}".format(name))
def is_config_name_valid(name):
"""Return True if 'name' is valid for a config, False otherwise.
Usage examples:
>>> is_config_name_valid('my_phabricator99')
True
>>> is_config_name_valid('My_phabricator99')
False
>>> is_config_name_valid('my-phabricator99')
True
>>> is_config_name_valid('my_phabricator.99')
False
:name: a string candidate config name
:returns: True or False
"""
return re.match(CONFIG_NAME_REGEX, name) is not None
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "valhallasw/phabricator-tools",
"path": "py/abd/abdt_fs.py",
"copies": "1",
"size": "15117",
"license": "apache-2.0",
"hash": 8660821039225938000,
"line_mean": 29.4778225806,
"line_max": 79,
"alpha_frac": 0.6275716081,
"autogenerated": false,
"ratio": 3.783979974968711,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49115515830687106,
"avg_score": null,
"num_lines": null
} |
# Arda Mavi
import numpy as np
from os import listdir
from skimage import io
from scipy.misc import imresize
from keras.preprocessing.image import array_to_img, img_to_array, load_img
def get_img(data_path):
# Getting image array from path:
img_size = 64
img = io.imread(data_path)
img = imresize(img, (img_size, img_size, 3))
return img
def get_dataset(dataset_path='Data/Train_Data'):
# Getting all data from data path:
try:
X = np.load('Data/npy_train_data/X.npy')
Y = np.load('Data/npy_train_data/Y.npy')
except:
labels = listdir(dataset_path) # Geting labels
print('Categories:\n', labels)
len_datas = 0
for label in labels:
len_datas += len(listdir(dataset_path+'/'+label))
X = np.zeros((len_datas, 64, 64, 3), dtype='float64')
Y = np.zeros(len_datas)
count_data = 0
count_categori = [-1,''] # For encode labels
for label in labels:
datas_path = dataset_path+'/'+label
for data in listdir(datas_path):
img = get_img(datas_path+'/'+data)
X[count_data] = img
# For encode labels:
if label != count_categori[1]:
count_categori[0] += 1
count_categori[1] = label
Y[count_data] = count_categori[0]
count_data += 1
# Create dateset:
import keras
Y = keras.utils.to_categorical(Y)
import os
if not os.path.exists('Data/npy_train_data/'):
os.makedirs('Data/npy_train_data/')
np.save('Data/npy_train_data/X.npy', X)
np.save('Data/npy_train_data/Y.npy', Y)
X /= 255.
from sklearn.model_selection import train_test_split
X, X_test, Y, Y_test = train_test_split(X, Y, test_size=0.1, random_state=42)
return X, X_test, Y, Y_test
| {
"repo_name": "ardamavi/Dog-Cat-Classifier",
"path": "get_dataset.py",
"copies": "1",
"size": "1900",
"license": "apache-2.0",
"hash": 2631299432122438700,
"line_mean": 34.8490566038,
"line_max": 81,
"alpha_frac": 0.5684210526,
"autogenerated": false,
"ratio": 3.4234234234234235,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44918444760234233,
"avg_score": null,
"num_lines": null
} |
# Arda Mavi
import os
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
def save_model(model):
if not os.path.exists('Data/Model/'):
os.makedirs('Data/Model/')
model_json = model.to_json()
with open("Data/Model/model.json", "w") as model_file:
model_file.write(model_json)
# serialize weights to HDF5
model.save_weights("Data/Model/weights.h5")
print('Model and weights saved')
return
def get_model(num_classes=2):
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
if __name__ == '__main__':
save_model(get_model())
| {
"repo_name": "ardamavi/Dog-Cat-Classifier",
"path": "get_model.py",
"copies": "1",
"size": "1283",
"license": "apache-2.0",
"hash": -1184265861308370700,
"line_mean": 25.7291666667,
"line_max": 94,
"alpha_frac": 0.6492595479,
"autogenerated": false,
"ratio": 3.1600985221674875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9295147326338269,
"avg_score": 0.0028421487458436165,
"num_lines": 48
} |
# Ard_to_py.py
# Interface between Arduino and Python
# <Chad Hobbs>
import serial
from graphics import *
def createwin(): # Build the basic window and greet the user
win = GraphWin("Arduino Interface",400,400)
win.setCoords(0,0,100,100)
win.setBackground("White")
greet = Text(Point(50,90),"Here is Python and Arduinos together")
greet.draw(win)
return win
def draw_main(win):
while True:
data = get_data()
print(data)
draw_button(win,33,60,'red',data[0])
draw_button(win,66,60,'blue',data[1])
draw_square(win,33,23,data[2])
draw_square(win,66,23,data[3])
def draw_button(win,X,Y,color,on):
if on == 0:
color = 'white'
shape = Circle(Point(X,Y),10)
shape.setOutline(color)
shape.setFill(color)
shape.draw(win)
return
def draw_square(win,X,Y,size):
if size > 1:
size = size / 2
shape = Rectangle(Point(X - size,Y - size), Point(X + size,Y + size))
shape.setOutline("black")
shape.setFill("black")
shape.draw(win)
top = Rectangle(Point(X - 21,Y + size + 1), Point(X + 21,Y + 21))
bottom = Rectangle(Point(X - 21,Y - 21), Point(X + 21,Y - size - 1))
left = Rectangle(Point(X - 21,Y - 21), Point(X - size - 1,Y + 21))
right = Rectangle(Point(X + size + 1,Y - 21), Point(X + 21,Y + 21))
top.setOutline("white")
top.setFill("white")
top.draw(win)
bottom.setOutline("white")
bottom.setFill("white")
bottom.draw(win)
left.setOutline("white")
left.setFill("white")
left.draw(win)
right.setOutline("white")
right.setFill("white")
right.draw(win)
return
def get_data():
values = []
ser = serial.Serial(3,9600)
data = ser.readline()
inter = str(data)
inter = inter.split('-').strip(' ')
print(inter)
values.append(int(inter[1]))
values.append(int(inter[2]))
values.append(int(inter[3]))
values.append(int(inter[4]))
return values
def main():
win = createwin()
draw_main(win)
main()
| {
"repo_name": "itsallvoodoo/random",
"path": "Arduino-Python-Interface/Ard_to_py.py",
"copies": "1",
"size": "2129",
"license": "apache-2.0",
"hash": 7293727885710677000,
"line_mean": 24.9493670886,
"line_max": 73,
"alpha_frac": 0.576326914,
"autogenerated": false,
"ratio": 3.0855072463768116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41618341603768116,
"avg_score": null,
"num_lines": null
} |
#Arduino connection, Adafruit16C connection and Servo test script
#
from time import sleep
port = "COM4"
arduino = Runtime.start("arduino","Arduino")
arduino.connect(port)
adaFruit16c = Runtime.createAndStart("AdaFruit16C","Adafruit16CServoDriver");
adaFruit16c.setController("arduino","1","0x40");
adaFruit16c.setPWMFreq(0,50);
servoPin1 = 15
servoPin2 = 2
servo01 = Runtime.start("servo01","Servo")
servo02 = Runtime.start("servo02","Servo")
servo01.attach(adaFruit16c,servoPin1,90,-1);
servo02.attach(adaFruit16c,servoPin2,90,-1);
#servo02.sync(servo01)
def servoMoveTo(restPos,delta):
servo01.moveTo(restPos + delta)
#servo02.moveTo(restPos + delta)
servo01.broadcastState()
#servo02.broadcastState()
#servo02.addServoEventListener(servo01)
#servo02.eventsEnabled(True)
#servo01.eventsEnabled(True)
#servo02.moveTo(90)
restPos = 45
delta = 0
def moveservos1():
for x in range (1,5):
for i in range (10,160,5):
servoMoveTo(restPos,i)
#servo01.moveTo(i)
#sleep for a bit
sleep(0.25)
#update servo GUI with current servo position
#sleep for a bit
sleep(0.25)
for i in range (160,10,-5):
servoMoveTo(restPos,i)
#servo01.moveTo(i)
#sleep for a bit
sleep(0.25)
#update servo GUI with current servo position
#sleep for a bit
sleep(0.25)
def moveservos2():
for w in range (1,10):
for y in range(20,160,5):
servo01.moveTo(y)
sleep(0.25)
servo01.broadcastState()
sleep(0.5)
for y in range(160,20,-5):
servo01.moveTo(y)
sleep(0.25)
servo01.broadcastState()
sleep(0.5)
moveservos2()
| {
"repo_name": "MyRobotLab/pyrobotlab",
"path": "home/CheekyMonkey/arduino-servo test script.py",
"copies": "1",
"size": "1576",
"license": "apache-2.0",
"hash": 6507070467604140000,
"line_mean": 20.0133333333,
"line_max": 77,
"alpha_frac": 0.7100253807,
"autogenerated": false,
"ratio": 2.5337620578778135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8385704361328523,
"avg_score": 0.07161661544985796,
"num_lines": 75
} |
"""Arduino-like library for Python on BeagleBone"""
import time
HIGH = "HIGH"
LOW = "LOW"
OUTPUT = "OUTPUT"
INPUT = "INPUT"
pinList = [] # needed for unexport()
startTime = time.time() # needed for millis()
digitalPinDef = {
"P8.3": 38,
"P8.4": 39,
"P8.5": 34,
"P8.6": 35,
"P8.7": 66,
"P8.8": 67,
"P8.9": 69,
"P8.10": 68,
"P8.11": 45,
"P8.12": 44,
"P8.13": 23,
"P8.14": 26,
"P8.15": 47,
"P8.16": 46,
"P8.17": 27,
"P8.18": 65,
"P8.19": 22,
"P8.20": 63,
"P8.21": 62,
"P8.22": 37,
"P8.23": 36,
"P8.24": 33,
"P8.25": 32,
"P8.26": 61,
"P8.27": 86,
"P8.28": 88,
"P8.29": 87,
"P8.30": 89,
"P8.31": 10,
"P8.32": 11,
"P8.33": 9,
"P8.34": 81,
"P8.35": 8,
"P8.36": 80,
"P8.37": 78,
"P8.38": 79,
"P8.39": 76,
"P8.40": 77,
"P8.41": 74,
"P8.42": 75,
"P8.43": 72,
"P8.44": 73,
"P8.45": 70,
"P8.46": 71,
"P9.11": 30,
"P9.12": 60,
"P9.13": 31,
"P9.14": 50,
"P9.15": 48,
"P9.16": 51,
"P9.17": 5,
"P9.18": 4,
"P9.19": 13,
"P9.20": 12,
"P9.21": 3,
"P9.22": 2,
"P9.23": 49,
"P9.24": 15,
"P9.25": 117,
"P9.26": 14,
"P9.27": 115,
"P9.28": 113,
"P9.29": 111,
"P9.30": 112,
"P9.31": 110,
"P9.41": 20,
"P9.42": 7}
pinMuxDef = {
"P8.3": "gpmc_ad6",
"P8.4": "gpmc_ad7",
"P8.5": "gpmc_ad2",
"P8.6": "gpmc_ad3",
"P8.7": "gpmc_advn_ale",
"P8.8": "gpmc_oen_ren",
"P8.9": "gpmc_ben0_cle",
"P8.10": "gpmc_wen",
"P8.11": "gpmc_ad13",
"P8.12": "gpmc_ad12",
"P8.13": "gpmc_ad9",
"P8.14": "gpmc_ad10",
"P8.15": "gpmc_ad15",
"P8.16": "gpmc_ad14",
"P8.17": "gpmc_ad11",
"P8.18": "gpmc_clk",
"P8.19": "gpmc_ad8",
"P8.20": "gpmc_csn2",
"P8.21": "gpmc_csn1",
"P8.22": "gpmc_ad5",
"P8.23": "gpmc_ad4",
"P8.24": "gpmc_ad1",
"P8.25": "gpmc_ad0",
"P8.26": "gpmc_csn0",
"P8.27": "lcd_vsync",
"P8.28": "lcd_pclk",
"P8.29": "lcd_hsync",
"P8.30": "lcd_ac_bias_en",
"P8.31": "lcd_data14",
"P8.32": "lcd_data15",
"P8.33": "lcd_data13",
"P8.34": "lcd_data11",
"P8.35": "lcd_data12",
"P8.36": "lcd_data10",
"P8.37": "lcd_data8",
"P8.38": "lcd_data9",
"P8.39": "lcd_data6",
"P8.40": "lcd_data7",
"P8.41": "lcd_data4",
"P8.42": "lcd_data5",
"P8.43": "lcd_data2",
"P8.44": "lcd_data3",
"P8.45": "lcd_data0",
"P8.46": "lcd_data1",
"P9.11": "gpmc_wait0",
"P9.12": "gpmc_ben1",
"P9.13": "gpmc_wpn",
"P9.14": "gpmc_a2",
"P9.15": "gpmc_a0",
"P9.16": "gpmc_a3",
"P9.17": "spi0_cs0",
"P9.18": "spi0_d1",
"P9.19": "uart1_rtsn",
"P9.20": "uart1_ctsn",
"P9.21": "spi0_d0",
"P9.22": "spi0_sclk",
"P9.23": "gpmc_a1",
"P9.24": "uart1_txd",
"P9.25": "mcasp0_ahclkx",
"P9.26": "uart1_rxd",
"P9.27": "mcasp0_fsr",
"P9.28": "mcasp0_ahclkr",
"P9.29": "mcasp0_fsx",
"P9.30": "mcasp0_axr0",
"P9.31": "mcasp0_ahclkx",
"P9.41": "xdma_event_intr0",
"P9.42": "ecap0_in_pwm0_out"}
analogPinDef = {
"P9.33": "ain4",
"P9.35": "ain6",
"P9.36": "ain5",
"P9.37": "ain2",
"P9.38": "ain3",
"P9.39": "ain0",
"P9.40": "ain1"}
def pinMode(pin, direction):
"""pinMode(pin, direction) opens (exports) a pin for use, sets the pinmux, and
sets the direction"""
if pin in digitalPinDef: # if we know how to refer to the pin:
fw = file("/sys/class/gpio/export", "w")
fw.write("%d" % (digitalPinDef[pin])) # write the pin to export to userspace
fw.close()
fileName = "/sys/class/gpio/gpio%d/direction" % (digitalPinDef[pin])
fw = file(fileName, "w")
if direction == INPUT:
fw.write("in") # write the diretion
muxfile = file("/sys/kernel/debug/omap_mux/" + pinMuxDef[pin], "w") # open its mux file
muxfile.write("2F") # put it into mode 7 input, no pulldown
muxfile.close
else:
fw.write("out") # write the diretion
muxfile = file("/sys/kernel/debug/omap_mux/" + pinMuxDef[pin], "w") # open its mux file
muxfile.write("7") # put it into mode 7 output)
muxfile.close
fw.close()
pinList.append(digitalPinDef[pin]) # Keep a list of exported pins so that we can unexport them.
else: #if we don't know how to refer to a pin:
print "pinMode error: Pin " + pin + " is not defined as a digital I/O pin in the pin definition."
def digitalWrite(pin, status):
"""digitalWrite(pin, status) sets a pin HIGH or LOW"""
if digitalPinDef[pin] in pinList: # check if we exported the pin in pinMode
fileName = "/sys/class/gpio/gpio%d/value" % (digitalPinDef[pin])
fw = file(fileName, "w") # open the pin's value file for writing
if status == HIGH:
fw.write("1") # Set the pin HIGH by writing 1 to its value file
if status == LOW:
fw.write("0") # Set the pin LOW by writing 0 to its value file
fw.close()
else: # if we haven't exported the pin, print an error:
print "digitalWrite error: Pin mode for " + pin + " has not been set. Use pinMode(pin, INPUT) first."
def digitalRead(pin):
"""digitalRead(pin) returns HIGH or LOW for a given pin."""
if digitalPinDef[pin] in pinList: # check if we exported the pin in pinMode
fileName = "/sys/class/gpio/gpio%d/value" % (digitalPinDef[pin])
fw = file(fileName, "r") # open the pin's value file for reading
inData = fw.read()
fw.close()
if inData == "0\n": # a 0 means it's low
return LOW
if inData == "1\n": # a 1 means it's high
return HIGH
else: # if we haven't exported the pin, print an error (not working for some reason):
print "digitalRead error: Pin mode for " + pin + " has not been set. Use pinMode(pin, OUTPUT) first."
return -1;
def analogRead(pin): #under construction!
"""analogRead(pin) returns analog value for a given pin."""
if pin in analogPinDef:
fileName = "/sys/devices/platform/tsc/" + (analogPinDef[pin])
fw = file(fileName, "r")
data = fw.read()
fw.close()
return data
else:
print "analogRead error: Pin " + pin + " is not defined as an analog in pin in the pin definition."
return -1;
def pinUnexport(pin): # helper function for cleanup()
"""pinUnexport(pin) closes a pin in sysfs. This is susally
called by cleanup() when a script is exiting."""
fw = file("/sys/class/gpio/unexport", "w")
fw.write("%d" % (pin))
fw.close()
def cleanup():
""" takes care of stepping through pins that were set with
pinMode and unExports them. Prints result"""
def find_key(dic, val):
return [k for k, v in dic.iteritems() if v == val][0] # helper function for getting friendly name of pin
print ""
print "Cleaning up. Unexporting the following pins:",
for pin in pinList: # for each pin we exported...
pinUnexport(pin) # ...unexport it...
print find_key(digitalPinDef, pin), #...and print the friendly name of the pin
def delay(millis):
"""delay(millis) sleeps the script for a given number of
milliseconds"""
time.sleep(millis/1000.0)
def millis():
"""millis() returns an int for the number of milliseconds since
the script started."""
return int((time.time() - startTime) * 1000)
def run(setup, main): # from PyBBIO by Alexander Hiam - ahiam@marlboro.edu - www.alexanderhiam.com https://github.com/alexanderhiam/PyBBIO
""" The main loop; must be passed a setup and a main function.
First the setup function will be called once, then the main
function wil be continuously until a stop signal is raised,
e.g. CTRL-C or a call to the stop() function from within the
main function. """
try:
setup()
while (True):
main()
except KeyboardInterrupt:
# Manual exit signal, clean up and exit happy
cleanup()
except Exception, e:
# Something may have gone wrong, clean up and print exception
cleanup()
print e
| {
"repo_name": "mrichardson23/mrBBIO",
"path": "mrbbio.py",
"copies": "2",
"size": "7656",
"license": "apache-2.0",
"hash": 6397321419360216000,
"line_mean": 27.7819548872,
"line_max": 138,
"alpha_frac": 0.5902560084,
"autogenerated": false,
"ratio": 2.212077434267553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3802333442667553,
"avg_score": null,
"num_lines": null
} |
try:
PORT = '/dev/ttyACM0'
board = pyfirmata.Arduino(PORT)
except:
PORT = raw_input("Enter full Arduino device location (ie: 'dev/ttyUSB0' etc): ")
board = pyfirmata.Arduino(PORT)
### SET PIN NUMBER ###
pin = int(raw_input('Enter Arduino digital pin number used: '))
def program():
### DELAY FACTORS ###
delay = 0.15 # Length of time LED between inter elemental lighting (dits/dahs in a single letter)
delay_factor = 1 # Speed up or slow down entire program by set percentage (Default: 1)
delay_intraletter = delay * delay_factor * 3 # Length of time between letters (standard morse = 3x inter element delay)
delay_interword = delay * delay_factor * 7 # Length of time between words (standard morse = 7x inter element delay)
delay_dit = delay * delay_factor # Length of illumination/buzz for dot/dit (.)
delay_dah = delay_intraletter # Lenght of illumination/buzz for dash/dah (-)
### SELECT WORD ###
user_list = [] # Blank
user_input = raw_input("\nEnter word to convert to morse code: ").lower() # Take user input, convert to lowercase
### CONVERT USER INPUT TO MORSE CODE ###
for x in user_input:
user_list.extend(morse.get(x))
print user_list
print '\n Creating morse message now...'
user_list = ''.join(user_list)
### 'TRANSMIT' MORSE CODE ###
for l in user_list:
if l == '.':
board.pass_time(delay_intraletter)
board.digital[pin].write(1)
print 'TRANSMITTING: .'
board.pass_time(delay_dit)
board.digital[pin].write(0)
elif l == '-':
board.pass_time(delay_intraletter)
board.digital[pin].write(1)
print 'TRANSMITTING: -'
board.pass_time(delay_dah)
board.digital[pin].write(0)
elif l == ' ':
board.pass_time(delay_interword)
print 'NEW WORD'
else:
pass
program()
| {
"repo_name": "joshkouri/morse",
"path": "morse.py",
"copies": "1",
"size": "3706",
"license": "mit",
"hash": -8937228539966866000,
"line_mean": 28.8870967742,
"line_max": 122,
"alpha_frac": 0.5936319482,
"autogenerated": false,
"ratio": 3.2942222222222224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43878541704222224,
"avg_score": null,
"num_lines": null
} |
import serial
import os
import time
def timestamp():
strtime = time.ctime().split()
strtime = strtime[3]
return "["+strtime+"]"
def cleanSerialData(data):
data = data.decode('utf-8')
data = data.rstrip("\n")
data = data.rstrip("\r")
return data
if(__name__ == "__main__"):
while(1):
try:
arduinoSerialData = serial.Serial('com6', 9600)
break
except serial.serialutil.SerialException:
pass
while(1):
mode = ""
try:
if(arduinoSerialData.inWaiting() > 0):
mode = cleanSerialData(arduinoSerialData.readline())
except serial.serialutil.SerialException:
try:
arduinoSerialData = serial.Serial('com6', 9600)
except serial.serialutil.SerialException:
pass
if(mode == "fetch"):
print(timestamp()+" fetching data")
data = ""
while(1):
if(arduinoSerialData.inWaiting() > 0):
data = cleanSerialData(arduinoSerialData.readline())
data = data.split(" ")
file = open("settings.txt", "r")
file2 = open("temp.txt", "w")
flag = 0
count = -1
if(data[0] == "1"):
for line in file:
if(count!=-1):
if(count == 1):
file2.write("Present Moisture : "+data[1]+"\n")
count = -1
continue
count+=1
if(flag == 0 and line[:9] == "Node ID :"):
flag = 1
count = 0
file2.write(line)
file.close()
file2.close()
os.remove("settings.txt")
os.rename("temp.txt", "settings.txt")
else:
flag2 = 0
for line in file:
if(count!=-1):
if(count == 1):
file2.write("Present Moisture : "+data[1]+"\n")
count = -1
continue
count+=1
if(flag == 0 and line[:9] == "Node ID :"):
if(flag2 == 0):
flag2 = 1
else:
flag = 1
count = 0
file2.write(line)
file.close()
file2.close()
os.remove("settings.txt")
os.rename("temp.txt", "settings.txt")
print(timestamp()+" fetched and uploaded to settings.txt")
mode = ""
break
elif(mode == "throw"):
print(timestamp()+" sending settings.txt data to arduino")
file = open("settings.txt", "r")
line = file.readline()
line = line.rstrip("\n")
time.sleep(1.1)
while(1):
try:
arduinoSerialData.write(line[line.rfind(" ")+1:].encode())
break
except:
pass
line = file.readline()
line = line.rstrip("\n")
time.sleep(1.1)
while(1):
try:
arduinoSerialData.write(line[line.rfind(" ")+1:].encode())
break
except:
pass
line = file.readline()
line = file.readline()
line = line.rstrip("\n")
time.sleep(1.1)
while(1):
try:
arduinoSerialData.write(line[line.rfind(" ")+1:].encode())
break
except:
pass
line = file.readline()
line = file.readline()
line = file.readline()
line = line.rstrip("\n")
time.sleep(1.1)
while(1):
try:
arduinoSerialData.write(line[line.rfind(" ")+1:].encode())
break
except:
pass
time.sleep(1.1)
file.close()
print(timestamp()+" sent data")
mode = ""
| {
"repo_name": "snehalgupta/Intelliwater",
"path": "arduino.py",
"copies": "1",
"size": "3336",
"license": "apache-2.0",
"hash": 8205386344215470000,
"line_mean": 20.5405405405,
"line_max": 63,
"alpha_frac": 0.5317745803,
"autogenerated": false,
"ratio": 2.9865711727842434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8723501127419231,
"avg_score": 0.058968925133002434,
"num_lines": 148
} |
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM3")
arduino.setSampleRate(30000)
readAnalogPin = 0
arduino.analogReadPollingStart(readAnalogPin)
arduino.addListener("publishPin", "python", "input")
pid = Runtime.createAndStart("pid","PID")
pid.setMode(1)
#set the range of the "correction"
pid.setOutputRange(-5, 5)
#set Kp, kd, ki kp = gain, how strong it react kd = how fast it react ki= take care of the sum of errors (differences between target and actual value) in the time
pid.setPID(10.0, 0, 1.0)
pid.setControllerDirection(0)
#set a starting analog value, which will pilot the MOSFET on the Gate
heaterValue = 512
def input():
thermistorPin = msg_arduino_publishPin.data[0]
print 'thermistor value is', thermistorPin.value
global heaterValue
global futureHeaterValue
#target of temperature or target value
pid.setSetpoint(150)
#input value
pid.setInput(thermistorPin.value)
pid.compute()
correction = pid.getOutput()
futureHeaterValue = (heaterValue + correction)
if (futureHeaterValue < 1024) and (futureHeaterValue >0):
heaterValue = futureHeaterValue
arduino.analogWrite(4,futureHeaterValue)
print heaterValue
else :
arduino.analogWrite(4,heaterValue)
print heaterValue
| {
"repo_name": "DarkRebel/myrobotlab",
"path": "src/resource/Python/examples/PID.temperatureControl.py",
"copies": "6",
"size": "1240",
"license": "apache-2.0",
"hash": -5027350029327452000,
"line_mean": 28.5238095238,
"line_max": 162,
"alpha_frac": 0.7693548387,
"autogenerated": false,
"ratio": 2.9879518072289155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03480179835551919,
"num_lines": 42
} |
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM3")
# arduino.setSampleRate(30000)
readAnalogPin = 0
arduino.arduino.enablePin(readAnalogPin)
arduino.addListener("publishPin", "python", "input")
pid = Runtime.createAndStart("pid","Pid")
pid.setMode(1)
#set the range of the "correction"
pid.setOutputRange(-5, 5)
#set Kp, kd, ki kp = gain, how strong it react kd = how fast it react ki= take care of the sum of errors (differences between target and actual value) in the time
pid.setPID(10.0, 0, 1.0)
pid.setControllerDirection(0)
#set a starting analog value, which will pilot the MOSFET on the Gate
heaterValue = 512
def input():
thermistorPin = msg_arduino_publishPin.data[0]
print 'thermistor value is', thermistorPin.value
global heaterValue
global futureHeaterValue
#target of temperature or target value
pid.setSetpoint(150)
#input value
pid.setInput(thermistorPin.value)
pid.compute()
correction = pid.getOutput()
futureHeaterValue = (heaterValue + correction)
if (futureHeaterValue < 1024) and (futureHeaterValue >0):
heaterValue = futureHeaterValue
arduino.analogWrite(4,futureHeaterValue)
print heaterValue
else :
arduino.analogWrite(4,heaterValue)
print heaterValue
| {
"repo_name": "MyRobotLab/pyrobotlab",
"path": "service/PID2.py",
"copies": "2",
"size": "1237",
"license": "apache-2.0",
"hash": 360294456449370100,
"line_mean": 28.4523809524,
"line_max": 162,
"alpha_frac": 0.7663702506,
"autogenerated": false,
"ratio": 2.9664268585131897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9558788117335595,
"avg_score": 0.03480179835551919,
"num_lines": 42
} |
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM9")
mouth = Runtime.create("mouth","Speech")
s8 = Runtime.createAndStart("s8","Servo")
s9 = Runtime.createAndStart("s9","Servo")
s10 = Runtime.createAndStart("s10","Servo")
s11 = Runtime.createAndStart("s11","Servo")
s13 = Runtime.createAndStart("s13","Servo")
s14 = Runtime.createAndStart("s14","Servo")
# s15 = Runtime.createAndStart("s15","Servo")
s16 = Runtime.createAndStart("s16","Servo")
s17 = Runtime.createAndStart("s17","Servo")
# s34 = Runtime.createAndStart("s43","Servo")
# s35 = Runtime.createAndStart("s35","Servo")
# s30 = Runtime.createAndStart("s30","Servo")
# s31 = Runtime.createAndStart("s31","Servo")
# s36 = Runtime.createAndStart("s36","Servo")
# s37 = Runtime.createAndStart("s37","Servo")
# s38 = Runtime.createAndStart("s38","Servo")
# s39 = Runtime.createAndStart("s39","Servo")
# s42 = Runtime.createAndStart("s42","Servo")
# s43 = Runtime.createAndStart("s43","Servo")
# s26 = Runtime.createAndStart("s26","Servo")
s8.setRest(12) # Left Elbow
s9.setRest(75) # left arm Turn
s10.setRest(103) # left Shaulder up
s11.setRest(123) # Left Omniplate
s13.setRest(81) # Neck Turn
s14.setRest(93) # Right Arm Up
# s15.setRest(63) # Right Omniplate
s16.setRest(79) # Right Arm Turn
s17.setRest(138) # Right Elbow
# s34.setRest(66) # Left Hip
# s35.setRest(126) # Right Hip
# s30.setRest(97) # Left Leg Turn
# s31.setRest(120) # Right Leg Turn
# s36.setRest(54) # Left Leg Up
# s37.setRest(46) # Right Leg up
# s38.setRest(75) # Knee Left
# s39.setRest(75) # Knee Right
# s42.setRest(132) # Ankle Left
# s43.setRest(89) # Ankle Right
# s26.setRest(71) # Mouth
s8.attach("arduino",8)
s9.attach("arduino",9)
s10.attach("arduino",10)
s11.attach("arduino",11)
s13.attach("arduino",13)
s17.attach("ardiuno",17)
s16.attach("ardiuno",16)
s14.attach("ardiuno",14)
def rest():
s8.rest() # right shoulder
s9.rest() # right arm up
s10.rest() # right elbow
s13.rest() # head\
s11.rest()
s17.rest()
s16.rest()
s14.rest()
def wave():
s8.moveTo(9)
s9.moveTo(80)
s8.moveTo(7)
s8.moveTo(105)
s9.moveTo(157)
s10.moveTo(6)
s8.moveTo(149)
sleep(0.4)
s8.moveTo(60)
sleep(0.4)
s8.moveTo(149)
sleep(0.4)
s8.moveTo(60)
sleep(0.4)
s8.moveTo(135)
s10.moveTo(102)
s9.moveTo(68)
s8.moveTo(1)
def bye():
s17.moveTo(139)
s16.moveTo(81)
s17.moveTo(53)
s16.moveTo(157)
s14.moveTo(0)
s17.moveTo(5)
s17.moveTo(74)
sleep(0.4)
s17.moveTo(1)
sleep(0.4)
s17.moveTo(74)
sleep(0.4)
s17.moveTo(1)
sleep(0.4)
s17.moveTo(32)
s14.moveTo(98)
s16.moveTo(83)
s17.moveTo(136)
def attach():
s8.attach() # right shoulder
s9.attach() # right arm up
s10.attach() # Left elbow
s11.attach() # Left Omniplate
s13.attach() # head
s14.attach()
s16.attach()
s17.attach()
def detach():
s8.detach() # Left shoulder
s9.detach() # Left arm up
s10.detach() # Left elbow
s11.detach() # Left Omniplate
s13.detach() # Neck Turn
s14.detach()
s16.detach()
s17.detach()
for x in range(0, 3):
attach()
# do a gesture
rest()
mouth.speakBlocking("I want to show you what i can do")
mouth.speakBlocking("I am the new Amby bot made by Adolph")
mouth.speakBlocking("Hello every one")
wave()
sleep(1)
rest()
sleep(1)
# wait for a second
sleep(1)
# do another gesture
mouth.speakBlocking("I think you are going to like me")
s13.moveTo(36) # Neck Turn
mouth.speakBlocking("I am alive")
sleep(0.5)
s13.moveTo(109) # Neck Turn
sleep(0.5)
mouth.speakBlocking("I am alive thrue mirobotlab")
s13.moveTo(62) # Neck Turn
sleep(0.5)
mouth.speakBlocking("I like you all")
s13.moveTo(36) # Neck Turn
sleep(0.2)
s13.moveTo(109) # Neck Turn
sleep(0.2)
s13.rest()
mouth.speakBlocking("You all are nice")
mouth.speakBlocking("Bye Bye Guys")
wave()
sleep(1)
rest()
sleep(1)
| {
"repo_name": "MyRobotLab/pyrobotlab",
"path": "home/AdolphSmith/AmbyByeBye.py",
"copies": "2",
"size": "3745",
"license": "apache-2.0",
"hash": 5931850001320249000,
"line_mean": 20.9005847953,
"line_max": 59,
"alpha_frac": 0.6929238985,
"autogenerated": false,
"ratio": 2.336244541484716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40291684399847166,
"avg_score": null,
"num_lines": null
} |
arduino = Runtime.createAndStart("arduino","Arduino")
joystick = runtime.createAndStart("joystick","Joystick")
hand = Runtime.createAndStart("hand","Servo")
arduino.connect("COM3", 57600, 8, 1, 0)
sleep(4)
arduino.attach(hand.getName() , 2)
b = 100
print b
hand.moveTo(b)
def x():
global b
x = msg_joystick_XAxisRaw.data[0]
print x
if (x == 1):
b += 1
print b
hand.moveTo(b)
elif (x == -1):
b -= 1
print b
hand.moveTo(b)
return
def a():
# the API is 0 based cause arrays are 0 based - but when I count button I start with 1
# so now buttons start @ 1 msg_joystick_button1 = button1 - its the "right" thing to do ...
# anyway Alessandruino said we are men not machines - so I will make it manly and not sissy machine !
a = msg_joystick_button1.data[0]
print a
if (a == 1):
print 'button pressed'
elif ( a == 0):
print 'button not pressed'
#create a message route from joy to python so we can listen for button
joystick.addListener("XAxisRaw", python.name, "x")
joystick.addListener("button1", python.name, "a")
| {
"repo_name": "mecax/pyrobotlab",
"path": "toSort/Joystick.robotHand.py",
"copies": "5",
"size": "1117",
"license": "apache-2.0",
"hash": 3949608159274851300,
"line_mean": 27.641025641,
"line_max": 105,
"alpha_frac": 0.6392121755,
"autogenerated": false,
"ratio": 3.1114206128133706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.04738404090070801,
"num_lines": 39
} |
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM5")
# arduino.setSampleRate(30000)
readAnalogPin = 0
arduino.enablePin(readAnalogPin)
arduino.addListener("publishPin", "python", "input")
pid = Runtime.start("pid","Pid")
#set Kp, kd, ki kp = gain, how strong it react kd = how fast it react ki= take care of the sum of errors (differences between target and actual value) in the time
pid.setPID("x", 10.0, 0, 1.0)
pid.setMode("x", 1)
#set the range of the "correction"
pid.setOutputRange("x", -5, 5)
# pid.setControllerDirection("x", 0)
#set a starting analog value, which will pilot the MOSFET on the Gate
heaterValue = 512
def onPin(data):
thermistorPin = data
print 'thermistor value is', thermistorPin.value
global heaterValue
global futureHeaterValue
#target of temperature or target value
pid.setSetpoint(150)
#input value
pid.setInput(thermistorPin.value)
pid.compute()
correction = pid.getOutput()
futureHeaterValue = (heaterValue + correction)
if (futureHeaterValue < 1024) and (futureHeaterValue >0):
heaterValue = futureHeaterValue
arduino.analogWrite(4,futureHeaterValue)
print heaterValue
else :
arduino.analogWrite(4,heaterValue)
print heaterValue
| {
"repo_name": "MyRobotLab/pyrobotlab",
"path": "service/Pid.py",
"copies": "2",
"size": "1266",
"license": "apache-2.0",
"hash": -8677735712170527000,
"line_mean": 26.7727272727,
"line_max": 162,
"alpha_frac": 0.7259083728,
"autogenerated": false,
"ratio": 2.99290780141844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4718816174218439,
"avg_score": null,
"num_lines": null
} |
# Arduino-Windows2x.py
#
# DEMO Arduino Diecimila Board and below, serial access, using standard
# Python and Windows 32 bit installs.
#
# This is an experimental idea only to test the Arduino Diecimila
# development board under Python 2.2.x to 2.6.x; (assume Python 2.6.x).
# (It is assumed that the Python install uses the default folders.)
# This working idea is copyright, (C)2008, B.Walker, G0LCU.
#
# Pyserial is NOT needed AT ALL for this to work.
#
# NOW issued entirely as Public Domain. You may do with it as you please...
#
# Copy this 'Arduino-Windows2x.py' file into the 'C:\Python26\Lib\' folder
# and you will be ready to roll... ;-)
#
# To run type:-
# >>> execfile("C:\\Python26\\Lib\\Arduino-Windows2x.py")<RETURN/ENTER>
# OR......
# >>> import Arduino-Windows2x<RETURN/ENTER>
#
# And away you go...
#
# Press ~Ctrl C~ to QUIT, OR, set input to maximum of 5V, i.e. 255.
# Do any imports as required.
import os
# Start off with a basic cleared screen...
os.system('CLS')
# The program proper.
def main():
print
print ' Arduino Diecimila Dev Board access demonsration Python 2.x.x code.'
print ' Original idea copyright, (C)2008, B.Walker, G0LCU.'
print ' Press ~Ctrl C~ to QUIT.'
print
# This is set up for my COM(n) port on this old P IV machine.
# You WILL have to change it to suit the COM port number generated
# by your particular machine. For example just change my COM5: to
# your COMx: number in the lines below using a simple text editor.
os.system("MODE COM5: BAUD=1200 PARITY=N DATA=8 STOP=1 to=on")
while 1:
# Open up a channel for USB/Serial reading on the Arduino board.
pointer = open('COM5:', 'rb', 2)
# Transfer an 8 bit number into `mybyte`.
mybyte = str(pointer.read(1))
# Immediately close the channel.
pointer.close()
# Place a wire link between ANALOG IN 0 and Gnd.
# Replace the wire link between ANALOG IN 0 and 3V3.
# Replace the wire link between ANALOG IN 0 and 5V.
# Watch the values change.
# Print the decimal value on screen.
print 'Decimal value at Arduino ADC Port0 is:-',ord(mybyte),'. '
# Ensure one safe getout when running!
if mybyte == chr(255): break
main()
# End of DEMO...
# Enjoy finding simple solutions to often very difficult problems... ;o)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577648_Arduino_DiecimilBoard_Access_Inside_Windows_32/recipe-577648.py",
"copies": "1",
"size": "2370",
"license": "mit",
"hash": 8820548762363327000,
"line_mean": 16.5555555556,
"line_max": 82,
"alpha_frac": 0.6679324895,
"autogenerated": false,
"ratio": 3.0980392156862746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9207868991956338,
"avg_score": 0.011620542645987289,
"num_lines": 135
} |
"""ArduPET is an office automation implementation. It is originally designed to
be use at the Tutorial Education Program (PET in pt-BR, hence the name) to turn
lamps on and off and unlock the door, while also checking the statuses.
Routes:
GET /lamp -- lists all lamps statuses
GET /lamp/<id> -- get the status of the lamp identified by <id>
PUT /lamp/<id> -- update the status of the lamp identified by <id>
GET /door -- get the status of the door
PUT /door -- unlock the door (unlocking relocks after some seconds)
"""
import flask
import serial
app = flask.Flask(__name__)
arduino = serial.Serial('/dev/ttyACM0', 9600)
LAMP_COUNT = 2
@app.route('/lamp', methods=['GET'])
def lamp_index():
"""Return a listing of all lamps statuses."""
return '', 501
@app.route('/lamp/<int:lamp_id>', methods=['GET', 'PUT'])
def lamp_status(lamp_id):
"""Read or update the status of a single lamp.
Keyword arguments:
id -- the id of the desired lamp.
"""
if lamp_id < 0 or lamp_id >= LAMP_COUNT:
flask.abort(404)
if flask.request.method == 'GET':
"""Bytes sent are: operation (r), target (l), id"""
call = ''.join(['r', 'l', chr(lamp_id)])
arduino.write(bytes(call, 'ASCII'))
status = ord(arduino.read())
return ''.join(['Lamp is ', 'on' if status else 'off']), 200
elif flask.request.method == 'PUT':
call = ''.join(['w', 'l', chr(lamp_id)])
arduino.write(bytes(call, 'ASCII'))
return '', 204
@app.route('/door', methods=['GET', 'PUT'])
def door_status():
"""Read the status or update (unlock) the door."""
if flask.request.method == 'GET':
"""Bytes sent are: operation (r), target(d), none (ignored)"""
call = ''.join(['r', 'd', '\0'])
arduino.write(bytes(call, 'ASCII'))
status = ord(arduino.read())
return ''.join(['Door is ', 'unlocked' if status else 'locked']), 200
elif flask.request.method == 'PUT':
call = ''.join(['w', 'd', '\0'])
arduino.write(bytes(call, 'ASCII'))
return '', 204
if __name__ == "__main__":
app.run(host='0.0.0.0')
| {
"repo_name": "ranisalt/ardupet",
"path": "ardupet.py",
"copies": "1",
"size": "1998",
"license": "mit",
"hash": -5813107089365314000,
"line_mean": 26.75,
"line_max": 79,
"alpha_frac": 0.6391391391,
"autogenerated": false,
"ratio": 2.9382352941176473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8990476604995349,
"avg_score": 0.017379565644459638,
"num_lines": 72
} |
# AREA API Serializer #
from django.utils import timezone
from django.core.urlresolvers import reverse
from rest_framework import serializers
from rest_framework import pagination
from area.models import Region, Location, LocationDirectory, LocationType
from item.serializers import ItemSerializer
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = ('id', 'name', 'area', 'owner')
class LocationTypeSerializer(serializers.ModelSerializer):
class Meta:
model = LocationType
fields = ('id', 'name')
class LocationDirectorySerializer(serializers.ModelSerializer):
type_id = serializers.SerializerMethodField(read_only=True)
def get_type_id(self, object):
""" Rename parameter. """
return object.id
class Meta:
model = LocationDirectory
fields = ('id', 'name', 'area', 'type_id')
class LocationSerializer(serializers.ModelSerializer):
design = serializers.SerializerMethodField(read_only=True)
design_id = serializers.SerializerMethodField(read_only=True)
type_id = serializers.SerializerMethodField(read_only=True)
region_id = serializers.SerializerMethodField(read_only=True)
def get_design(self, object):
""" Generate directory for object. """
return str(object.get_design())
def get_design_id(self, object):
""" Get design id. """
return object.get_design().id
def get_type_id(self, object):
""" Global type of location Design. """
return object.get_design().get_type().id
def get_region_id(self, object):
""" Get Region id. """
return object.region.id
class Meta:
model = Location
fields = ('id', 'name', 'design', 'design_id', 'type_id', 'region_id')
### DETAILED SERIALIZERS ###
class LocationDetailSerializer(LocationSerializer):
free_area = serializers.SerializerMethodField(read_only=True)
type = serializers.SerializerMethodField(read_only=True)
owner = serializers.SerializerMethodField(read_only=True)
items = serializers.SerializerMethodField(read_only=True)
def get_items(self, object):
""" List of items. """
items = object.get_items().all()
serializer = ItemSerializer(items, many=True)
return serializer.data
def get_free_area(self, location):
""" Calculated free area in Location. """
return int(location.get_free_area())
def get_type(self, location):
""" Global location type. """
return str(location.get_type())
def get_owner(self, location):
""" Owner of Region. """
return int(location.get_owner().id)
class Meta:
model = Location
fields = ('id', 'name', 'design', 'design_id', 'type', 'type_id', 'region_id', 'free_area', 'owner', 'items')
class RegionDetailSerializer(serializers.ModelSerializer):
locations = LocationSerializer(many=True, read_only=True)
free_area = serializers.SerializerMethodField(read_only=True)
def get_free_area(self, object):
""" Calculated free area in Region. """
return int(object.get_free_area())
class Meta:
model = Region
fields = ('id', 'name', 'area', 'free_area', 'locations', 'owner')
| {
"repo_name": "ngr/sm_00",
"path": "area/serializers.py",
"copies": "1",
"size": "3422",
"license": "mit",
"hash": 6279190598414505000,
"line_mean": 33.2783505155,
"line_max": 117,
"alpha_frac": 0.6361776739,
"autogenerated": false,
"ratio": 4.188494492044064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01204225579585207,
"num_lines": 97
} |
# SET methods #
# FIXME Use this for debugging only now!
def set_name(self, name=''):
""" Set a new name of Region. """
self.name = name
def set_owner(self, owner):
""" Change owner of Region. """
self.owner = owner
class LocationType(models.Model):
""" This is General type of Locations. """
name = models.CharField(max_length=127)
def __str__(self):
return self.name
def get_name(self):
return self.name
class LocationDirectory(models.Model):
""" This is Location design directory. """
name = models.CharField(max_length=127)
area = models.PositiveIntegerField(default=1)
type = models.ForeignKey('area.LocationType')
def __str__(self):
return self.name
def get_name(self):
""" Return name of LocationDirectory. """
return self.name
def get_type(self):
""" Global type of this LocationDirectory. """
return self.type
def get_area(self):
""" Default Area of this LocationDirectory. """
return self.area
class BuildingMaterialRecipe(models.Model):
""" Recipes of materials required to construct Locations. """
task_type = models.ForeignKey('task.BuildingTaskDirectory', related_name='materials')
ingredient = models.ForeignKey('item.ItemDirectory', related_name='building_recipes')
amount = models.PositiveIntegerField(default=1)
def __str__(self):
""" Return the name of Recipe. """
return "{0} for {1} recipe".format(self.ingredient, self.task_type)
class Location(models.Model):
name = models.CharField(max_length=127, blank=True)
region = models.ForeignKey(Region, related_name='locations')
design = models.ForeignKey(LocationDirectory)
def __str__(self):
""" Return the name of location. """
return "{0} - {1} ".format(self.name, self.get_design())
def get_name(self):
""" Return the name of location. """
return self.name
def get_area(self):
""" Return the area of location. """
return self.get_design().get_area()
def get_region(self):
""" Return the parent Region of Location. """
return self.region
def get_design(self):
""" Return the LocationDesign. """
return self.design
def get_type(self):
""" Return the LocationDirectory General Type. """
return self.get_design().get_type()
def get_owner(self):
""" Return the owner of the parent Region. """
return self.get_region().get_owner()
def get_tasks(self, running=True):
""" List of current tasks in Location. """
return self.tasks.filter(_retrieved=False, _fulfilled__lt=100.0).all()\
if running else self.tasks.all()
def get_free_area(self):
""" Returns amount of unused area in Location to determine if Task can be created here. """
# FIXME!
# Get all Tasks in Location
tasks = self.get_tasks(running=True)
# Get area used by each task
area_used_by_tasks = [t.get_assignments(running=True).count() \
* t.get_type().get_area_per_worker() for t in tasks]
return self.get_area() - sum(area_used_by_tasks)
def get_items(self):
""" List of Items in Location. """
return self.items
| {
"repo_name": "ngr/sm_00",
"path": "area/models.py",
"copies": "1",
"size": "6119",
"license": "mit",
"hash": 5042660296678238000,
"line_mean": 32.2554347826,
"line_max": 99,
"alpha_frac": 0.6108841314,
"autogenerated": false,
"ratio": 4.211286992429456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014495791094142833,
"num_lines": 184
} |
# A reaction to: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/552751
from webob import Request, Response
from webob import exc
from simplejson import loads, dumps
import traceback
import sys
class JsonRpcApp(object):
"""
Serve the given object via json-rpc (http://json-rpc.org/)
"""
def __init__(self, obj):
self.obj = obj
def __call__(self, environ, start_response):
req = Request(environ)
try:
resp = self.process(req)
except ValueError, e:
resp = exc.HTTPBadRequest(str(e))
except exc.HTTPException, e:
resp = e
return resp(environ, start_response)
def process(self, req):
if not req.method == 'POST':
raise exc.HTTPMethodNotAllowed(
"Only POST allowed",
allowed='POST')
try:
json = loads(req.body)
except ValueError, e:
raise ValueError('Bad JSON: %s' % e)
try:
method = json['method']
params = json['params']
id = json['id']
except KeyError, e:
raise ValueError(
"JSON body missing parameter: %s" % e)
if method.startswith('_'):
raise exc.HTTPForbidden(
"Bad method name %s: must not start with _" % method)
if not isinstance(params, list):
raise ValueError(
"Bad params %r: must be a list" % params)
try:
method = getattr(self.obj, method)
except AttributeError:
raise ValueError(
"No such method %s" % method)
try:
result = method(*params)
except:
text = traceback.format_exc()
exc_value = sys.exc_info()[1]
error_value = dict(
name='JSONRPCError',
code=100,
message=str(exc_value),
error=text)
return Response(
status=500,
content_type='application/json',
body=dumps(dict(result=None,
error=error_value,
id=id)))
return Response(
content_type='application/json',
body=dumps(dict(result=result,
error=None,
id=id)))
class ServerProxy(object):
"""
JSON proxy to a remote service.
"""
def __init__(self, url, proxy=None):
self._url = url
if proxy is None:
from wsgiproxy.exactproxy import proxy_exact_request
proxy = proxy_exact_request
self.proxy = proxy
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
return _Method(self, name)
def __repr__(self):
return '<%s for %s>' % (
self.__class__.__name__, self._url)
class _Method(object):
def __init__(self, parent, name):
self.parent = parent
self.name = name
def __call__(self, *args):
json = dict(method=self.name,
id=None,
params=list(args))
req = Request.blank(self.parent._url)
req.method = 'POST'
req.content_type = 'application/json'
req.body = dumps(json)
resp = req.get_response(self.parent.proxy)
if resp.status_int != 200 and not (
resp.status_int == 500
and resp.content_type == 'application/json'):
raise ProxyError(
"Error from JSON-RPC client %s: %s"
% (self.parent._url, resp.status),
resp)
json = loads(resp.body)
if json.get('error') is not None:
e = Fault(
json['error'].get('message'),
json['error'].get('code'),
json['error'].get('error'),
resp)
raise e
return json['result']
class ProxyError(Exception):
"""
Raised when a request via ServerProxy breaks
"""
def __init__(self, message, response):
Exception.__init__(self, message)
self.response = response
class Fault(Exception):
"""
Raised when there is a remote error
"""
def __init__(self, message, code, error, response):
Exception.__init__(self, message)
self.code = code
self.error = error
self.response = response
def __str__(self):
return 'Method error calling %s: %s\n%s' % (
self.response.request.url,
self.args[0],
self.error)
class DemoObject(object):
"""
Something interesting to attach to
"""
def add(self, *args):
return sum(args)
def average(self, *args):
return sum(args) / float(len(args))
def divide(self, a, b):
return a / b
def make_app(expr):
module, expression = expr.split(':', 1)
__import__(module)
module = sys.modules[module]
obj = eval(expression, module.__dict__)
return JsonRpcApp(obj)
def main(args=None):
import optparse
from wsgiref import simple_server
parser = optparse.OptionParser(
usage='%prog [OPTIONS] MODULE:EXPRESSION')
parser.add_option(
'-p', '--port', default='8080',
help='Port to serve on (default 8080)')
parser.add_option(
'-H', '--host', default='127.0.0.1',
help='Host to serve on (default localhost; 0.0.0.0 to make public)')
options, args = parser.parse_args()
if not args or len(args) > 1:
print 'You must give a single object reference'
parser.print_help()
sys.exit(2)
app = make_app(args[0])
server = simple_server.make_server(options.host, int(options.port), app)
print 'Serving on http://%s:%s' % (options.host, options.port)
server.serve_forever()
# Try python jsonrpc.py 'jsonrpc:DemoObject()'
if __name__ == '__main__':
main()
| {
"repo_name": "bratsche/Neutron-Drive",
"path": "google_appengine/lib/webob_1_1_1/docs/jsonrpc-example-code/jsonrpc.py",
"copies": "5",
"size": "5970",
"license": "bsd-3-clause",
"hash": -4050960155617656000,
"line_mean": 29.932642487,
"line_max": 79,
"alpha_frac": 0.5301507538,
"autogenerated": false,
"ratio": 4.160278745644599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002712301072350322,
"num_lines": 193
} |
# A read-eval-print-loop for MCEdit.
# Created to allow programmers to quickly experiment with code without worrying
# about destroying their world thanks to MCEdit supplying the ability to quickly
# view and undo changes.
# If there are any issues or enhancement requests please submit a bug report at:
# https://github.com/qqii/mcedit-filters
import __future__
import sys
import traceback
import pymclevel
# coedop.py
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
PyCF_DONT_IMPLY_DEDENT = 0x200
def _maybe_compile(compiler, source, filename, symbol):
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break
else:
if symbol != "eval":
source = "pass"
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError, err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError, err1:
pass
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError, err2:
pass
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise SyntaxError, err1
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
return _maybe_compile(self.compiler, source, filename, symbol)
# code.py
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
pass
return oldvalue
class InteractiveInterpreter:
def __init__(self, locals=None):
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
self.showsyntaxerror(filename)
return False
if code is None:
return True
self.runcode(code)
return False
def runcode(self, code):
try:
exec code in self.locals
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def showsyntaxerror(self, filename=None):
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
pass
else:
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self):
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
def __init__(self, locals=None, filename="<console>"):
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
self.buffer = []
def interact(self, banner=None):
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
encoding = getattr(sys.stdin, "encoding", None)
if encoding and not isinstance(line, unicode):
line = line.decode(encoding)
if line.lower().startswith("exit"):
self.write("Exiting...\n")
break
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
return raw_input(prompt)
def interact(banner=None, readfunc=None, local=None):
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
# REPL.py
displayName = 'Read Evaluate Print Loop'
inputs = (
('Include Globals', False),
)
def perform(level, box, options):
vars = {"pymclevel": pymclevel}
vars.update(locals())
if options["Include Globals"]:
vars.update(globals())
shell = InteractiveConsole(vars)
banner = (
"\n"
"This is a read-eval-print-loop for MCEdit. Type \"exit\" to exit.\n\n"
"The variables you have access to are:\n"
"box - the bounding box of your selection.\n"
"level - the world you have currently loaded in mcedit\n"
"pymclevel - the pymclevel module for editing minecraft worlds and data\n\n"
"Please note that it is normal for the mcedit gui window to become unresponsive, "
"it will return to normal once you exit repl mode."
)
shell.interact(banner=banner)
| {
"repo_name": "qqii/mcedit-filters",
"path": "REPL.py",
"copies": "1",
"size": "7564",
"license": "mit",
"hash": 3883925688409963000,
"line_mean": 28.546875,
"line_max": 90,
"alpha_frac": 0.5609465891,
"autogenerated": false,
"ratio": 4.16061606160616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5221562650706161,
"avg_score": null,
"num_lines": null
} |
'''A Read-Eval-Print-Loop with "help" support'''
import sys
import traceback
class InteractiveInterpreter:
'''A simple interpreter with built-in help'''
def __init__(self, evaluate=None, parse=None, global_env=None):
self.evaluate = evaluate
self.parse = parse
self.global_env = global_env
self.started = False
self.prompt = 'repl> '
self.prompt2 = ' ... '
def repl(self):
"A read-eval-print loop."
self.started = True
print("\n ==== Enter (quit) to end. ====\n")
while True:
inp = self.read_expression()
if not inp:
continue
try:
val = self.evaluate(self.parse(inp))
if val is not None:
print(self.to_string(val))
except (KeyboardInterrupt, SystemExit):
print("\n Goodbye!")
return
except Exception as e:
print(' {}: {}'.format(type(e).__name__, e))
if self.global_env["_DEBUG"]:
traceback.print_exc()
def read_expression(self):
'''Reads an expression from a prompt'''
inp = input(self.prompt)
open_parens = inp.count("(") - inp.count(")")
while open_parens > 0:
inp += ' ' + input(self.prompt2)
open_parens = inp.count("(") - inp.count(")")
if inp.startswith(("parse", "help", "dir")):
self.handle_internally(inp)
return None
return inp
def handle_internally(self, inp):
if inp.startswith("parse "):
expr = inp[6:]
print(" {}\n".format(self.parse(expr)))
elif inp.startswith("help"):
help = inp.split()
if len(help) == 1:
self.show_variables()
else:
self.show_variables(help[1])
elif inp.startswith("dir"):
keys = [x for x in self.global_env.keys()
if not x.startswith("__")]
print("")
for i, k in enumerate(keys):
print("{0:25}".format(k), end='')
if i % 3 == 0:
print("")
print("")
def start(self):
'''starts the interpreter if not already running'''
if self.started:
return
try:
self.repl()
except BaseException:
# do not use print after KeyboardInterrupt
raise
sys.stdout.write("\n Exiting petit_lisp.")
def to_string(self, exp):
"Convert a Python object back into a Lisp-readable string."
if not isinstance(exp, list):
if exp is True:
return '"True"'
elif exp is False:
return '"False"'
elif isinstance(exp, complex):
return str(exp).replace('j', 'i')[1:-1] # remove () put by Python
return str(exp)
else:
return '(' + ' '.join(self.to_string(s) for s in exp) + ')'
def show_value(self, var, env):
'''Displays the value of a variable in a given environment or dict'''
val = env[var]
if not isinstance(val, (int, float, complex, str)):
if hasattr(val, '__doc__') and val.__doc__ is not None:
val = ' '.join(val.__doc__.split('\n')[:3])
if isinstance(val, str):
if len(val) > 75:
val = val[:75].strip() + "..."
if val.startswith("list()"):
val = "Empty list"
print("{0:15}: {1:75}".format(var, self.to_string(val)))
def show_variables(self, obj=None):
'''Inspired by Python's help: shows a list of defined names and
their values or description
'''
env = self.global_env
if obj == "help":
print("Usage: help, help variable, help globals, "
"help user-defined")
elif obj not in [None, "user-defined", "globals"]:
if obj not in env:
print("Unknown variable: ", obj)
else:
self.show_value(obj, env)
else:
names = sorted(env.keys())
for var in names:
if var.startswith('__'):
continue
if obj == "user-defined" and var in self.global_env:
continue
elif obj == "globals" and var not in self.global_env:
continue
self.show_value(var, env)
print()
| {
"repo_name": "aroberge/lispy-experiments",
"path": "src/repl.py",
"copies": "1",
"size": "4603",
"license": "cc0-1.0",
"hash": 669378691597672100,
"line_mean": 34.1374045802,
"line_max": 82,
"alpha_frac": 0.4799044102,
"autogenerated": false,
"ratio": 4.305893358278765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5285797768478765,
"avg_score": null,
"num_lines": null
} |
"""A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline(). Allows recursive
use, for nested multipart messages. Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookkeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
__all__ = ["MultiFile","Error"]
class Error(Exception):
pass
class MultiFile:
seekable = 0
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = [] # Grows down
self.level = 0
self.last = 0
self.readahead = ""
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = [] # Grows down
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - len(self.readahead) - self.start
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
self.readahead = ""
def readline(self):
if not self.readahead:
self.readahead = self._readline()
line = self.readahead
if line:
self.readahead = self._readline()
if not self.readahead:
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
return line
def _readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
marker = line.rstrip()
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i in range(len(self.stack)):
sep = self.stack[i]
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def read(self): # Note: no size argument -- read until EOF only!
return ''.join(self.readlines())
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.insert(0, sep)
if self.seekable:
self.posstack.insert(0, self.start)
self.start = self.fp.tell()
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
del self.stack[0]
if self.seekable:
self.start = self.posstack[0]
del self.posstack[0]
if self.level > 0:
self.lastpos = abslastpos - self.start
def is_data(self, line):
return line[:2] != '--'
def section_divider(self, str):
return "--" + str
def end_marker(self, str):
return "--" + str + "--"
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.2/Lib/multifile.py",
"copies": "1",
"size": "5241",
"license": "mit",
"hash": 1361403083506459600,
"line_mean": 28.9485714286,
"line_max": 76,
"alpha_frac": 0.5310055333,
"autogenerated": false,
"ratio": 4.0007633587786255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022323850575883286,
"num_lines": 175
} |
"""A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline(). Allows recursive
use, for nested multipart messages. Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookkeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
from warnings import warn
warn("the multifile module has been deprecated since Python 2.5",
DeprecationWarning, stacklevel=2)
del warn
__all__ = ["MultiFile","Error"]
class Error(Exception):
pass
class MultiFile:
seekable = 0
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = []
self.level = 0
self.last = 0
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = []
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
def readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
marker = line.rstrip()
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i, sep in enumerate(reversed(self.stack)):
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def read(self): # Note: no size argument -- read until EOF only!
return ''.join(self.readlines())
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.append(sep)
if self.seekable:
self.posstack.append(self.start)
self.start = self.fp.tell()
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
self.stack.pop()
if self.seekable:
self.start = self.posstack.pop()
if self.level > 0:
self.lastpos = abslastpos - self.start
def is_data(self, line):
return line[:2] != '--'
def section_divider(self, str):
return "--" + str
def end_marker(self, str):
return "--" + str + "--"
| {
"repo_name": "MattDevo/edk2",
"path": "AppPkg/Applications/Python/Python-2.7.2/Lib/multifile.py",
"copies": "67",
"size": "4982",
"license": "bsd-2-clause",
"hash": -6837340273322319000,
"line_mean": 28.7530864198,
"line_max": 76,
"alpha_frac": 0.530710558,
"autogenerated": false,
"ratio": 4.10717230008244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021855996025508177,
"num_lines": 162
} |
"""Area drawing algorithm"""
import itertools
from gfxlcd.drawing.pixel import Pixel
from PIL import Image
class Area(Pixel):
"""Area drawing algorithm"""
def __init__(self, driver):
self.driver = driver
Pixel.__init__(self, driver)
def init(self):
"""additional initialization"""
pass
def draw_pixel(self, pos_x, pos_y, color=None):
"""draw one pixel"""
if color is None:
color = self.options['color']
self._set_area(pos_x, pos_y, pos_x, pos_y)
self.driver.data(self._convert_color(color), None)
def _draw_vertical_line(self, pos_x, pos_y, length):
"""draw vertical line"""
self._set_area(pos_x, pos_y, pos_x, pos_y + length)
color = self._convert_color(self.options['color'])
for _ in itertools.repeat(None, length):
self.driver.data(color, None)
def _draw_horizontal_line(self, pos_x, pos_y, length):
"""draw horizontal line"""
self._set_area(pos_x, pos_y, pos_x + length, pos_y)
color = self._convert_color(self.options['color'])
for _ in itertools.repeat(None, length):
self.driver.data(color, None)
def draw_line(self, pos_x1, pos_y1, pos_x2, pos_y2):
"""draw diagonal line"""
width = abs(pos_x2 - pos_x1)
height = abs(pos_y2 - pos_y1)
if pos_x1 == pos_x2:
steps = [height+1]
horizontal = False
offset_x = offset_y = 0
elif pos_y1 == pos_y2:
steps = [width+1]
horizontal = True
offset_x = offset_y = 0
elif width > height:
width += 1
if pos_x2 < pos_x1:
pos_x1, pos_x2 = pos_x2, pos_x1
pos_y1, pos_y2 = pos_y2, pos_y1
offset_y = 1 if pos_y2 > pos_y1 else -1
offset_x = 1 if pos_x2 > pos_x1 else -1
horizontal = True
step = height + 1
length = width // step
steps = self._calculate_line_steps(length, step, width)
else:
height += 1
if pos_y2 < pos_y1:
pos_x1, pos_x2 = pos_x2, pos_x1
pos_y1, pos_y2 = pos_y2, pos_y1
offset_y = 1 if pos_y2 > pos_y1 else -1
offset_x = 1 if pos_x2 > pos_x1 else -1
horizontal = False
step = width + 1
length = height // step
steps = self._calculate_line_steps(length, step, height)
delta_y = 0
delta_x = 0
for idx, step in enumerate(steps):
if horizontal:
self._draw_horizontal_line(
int(pos_x1 + delta_x),
int(pos_y1 + (idx * offset_y)),
int(step)
)
delta_x += step * offset_x
else:
self._draw_vertical_line(
int(pos_x1 + (idx * offset_x)),
int(pos_y1 + delta_y),
int(step)
)
delta_y += step * offset_y
def fill_rect(self, pos_x1, pos_y1, pos_x2, pos_y2):
"""fill an area"""
size = (abs(pos_x2 - pos_x1) + 1) * (abs(pos_y2 - pos_y1) + 1)
self._set_area(
min(pos_x1, pos_x2),
min(pos_y1, pos_y2),
max(pos_x1, pos_x2),
max(pos_y1, pos_y2)
)
# color = self._converted_background_color()
color = self._convert_color(self.options['background_color'])
for _ in range(0, size):
self.driver.data(color, None)
def draw_image(self, pos_x, pos_y, image):
"""draw a PIL image"""
if isinstance(image, str):
image = Image.open(image)
image_file = image.convert('RGB')
width, height = image_file.size
self._set_area(
pos_x,
pos_y,
pos_x + width - 1,
pos_y + height - 1
)
row = 0
col = 0
area = None
temporary_area = None
for red, green, blue in list(image_file.getdata()):
if self._is_transparent((red, green, blue)):
area = (
pos_x,
pos_y + row + 1,
pos_x + width - 1,
pos_y + height - 1
)
temporary_area = (
pos_x + col + 1,
pos_y + row,
pos_x + width - 1,
pos_y + row
)
else:
if temporary_area is not None:
self._set_area(*temporary_area)
temporary_area = None
self.color = (red, green, blue)
self.driver.data(
self._convert_color(self.options['color']), None
)
col += 1
if col > width - 1:
col = 0
row += 1
if area is not None:
self._set_area(*area)
area = None
temporary_area = None
def _is_transparent(self, color):
"""check if color is a transparency color"""
if self.options['transparency_color'] is None:
return False
elif type(self.options['transparency_color'][0]) == int \
and color == self.options['transparency_color']:
return True
elif (type(self.options['transparency_color'][0]) == list or
type(self.options['transparency_color'][0]) == tuple) \
and color in self.options['transparency_color']:
return True
return False
def _draw_letter(self, pos_x, pos_y, letter, with_background=False):
"""draw a letter"""
if not with_background:
super()._draw_letter(pos_x, pos_y, letter, with_background)
else:
font = self.options['font']
self._set_area(
pos_x,
pos_y,
pos_x + font.size[0] - 1,
pos_y + font.size[1] - 1
)
bits = font.size[0]
color = self._convert_color(self.options['color'])
background_color = self._convert_color(self.options['background_color'])
for row, data in enumerate(font.get(letter)):
for bit in range(bits):
if data & 0x01:
self.driver.data(color, None)
else:
self.driver.data(background_color, None)
data >>= 1
| {
"repo_name": "bkosciow/gfxlcd",
"path": "gfxlcd/drawing/area.py",
"copies": "1",
"size": "6634",
"license": "mit",
"hash": 6985469976448931000,
"line_mean": 34.1005291005,
"line_max": 84,
"alpha_frac": 0.4678926741,
"autogenerated": false,
"ratio": 3.8280438545874205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.979247922591203,
"avg_score": 0.0006914605550781334,
"num_lines": 189
} |
"""Area driver """
from gfxlcd.abstract.driver import Driver
class AreaDriver(Driver):
"""Null communication driver"""
def __init__(self, width, height):
self.height = height
self.width = width
self.buffer = [[0] * self.height for x in range(self.width)]
self.area = {
'start_x': 0,
'start_y': 0,
'end_x': width,
'end_y': height
}
self.pointer = (0, 0)
def init(self):
"""initialize pins"""
pass
def reset(self):
"""reset a display"""
pass
def cmd(self, data, enable):
"""send command to display"""
pass
def data(self, data, enable):
"""send data to display"""
app_x, app_y = self.pointer
self.buffer[
self.area['start_x'] + app_x][self.area['start_y'] + app_y
] = data
self._inc_pointer()
def _inc_pointer(self):
app_x, app_y = self.pointer
app_x += 1
if self.area['start_x'] + app_x > self.area['end_x']:
app_x = 0
app_y += 1
if self.area['start_y'] + app_y > self.area['end_y']:
app_x = 0
app_y = 0
self.pointer = (app_x, app_y)
| {
"repo_name": "bkosciow/gfxlcd",
"path": "gfxlcd/driver/null/area_driver.py",
"copies": "1",
"size": "1253",
"license": "mit",
"hash": 1117461018700747800,
"line_mean": 24.06,
"line_max": 70,
"alpha_frac": 0.4820430966,
"autogenerated": false,
"ratio": 3.5698005698005697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9545961313459392,
"avg_score": 0.001176470588235294,
"num_lines": 50
} |
"""A really dump pusher 'clone', for use in testing and running locally."""
import argparse
import collections
import json
import logging
import SimpleHTTPServer
import SocketServer
import sys
import threading
import time
import SimpleWebSocketServer
HTTP_PORT = 8101
WEBSOCKET_PORT = 8102
LOGFMT = '%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s'
class SocketHandler(SimpleWebSocketServer.WebSocket):
"""Represents a websocket connection."""
# pylint: disable=invalid-name
def __init__(self, sockets, server, sock, address):
super(SocketHandler, self).__init__(server, sock, address)
self._sockets = sockets
self._channels = []
def handleMessage(self):
"""Only message we get is a subscription."""
if self.data is None:
return
try:
# message should be a subscription, of form {channel: 'channel_name'}
logging.info('\'%s\' received', self.data)
data = json.loads(self.data.decode('utf-8'))
self._channels.append(data['channel'])
self._sockets[data['channel']].append(self)
except:
logging.error('Error handling message:', exc_info=sys.exc_info())
def handleConnected(self):
logging.info('%s connected', self.address)
def handleClose(self):
logging.info('%s closed', self.address, exc_info=sys.exc_info())
for channel in self._channels:
self._sockets[channel].remove(self)
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Represents a http requests."""
# pylint: disable=invalid-name,too-many-public-methods
def __init__(self, sockets, request, client_address, server):
self._sockets = sockets
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, request, client_address, server)
def do_POST(self):
"""Send request body to /channel."""
try:
channel = self.path.split('/')[-1]
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
#logging.info('Sending \"%s\" to \"%s\"', post_body, channel)
for socket in self._sockets[channel]:
socket.sendMessage(post_body)
self.send_response(204, '')
except:
logging.error('Error sending message:', exc_info=sys.exc_info())
class SimplePusher(object):
"""A very simple websocket / push service."""
def __init__(self, args):
self._args = args
self._sockets = collections.defaultdict(list)
self._httpd = None
self._httpd_thread = None
self._websocket_server = None
self._websocket_server_thread = None
def _http_request_handler(self, request, client_address, server):
return ServerHandler(self._sockets, request, client_address, server)
def _websocket_request_handler(self, server, sock, addr):
return SocketHandler(self._sockets, server, sock, addr)
def start(self):
"""Start this."""
logging.info('Starting local websocket server.')
self._httpd = SocketServer.TCPServer(
('', self._args.http_port), self._http_request_handler)
self._httpd_thread = threading.Thread(target=self._httpd.serve_forever)
self._httpd_thread.start()
self._websocket_server = SimpleWebSocketServer.SimpleWebSocketServer(
'', self._args.websocket_port, self._websocket_request_handler)
self._websocket_server_thread = threading.Thread(
target=self._websocket_server.serveforever)
self._websocket_server_thread.start()
def stop(self):
"""Stop this."""
logging.info('Stopping local websocket server.')
self._httpd.shutdown()
self._httpd_thread.join()
self._websocket_server.close()
self._websocket_server_thread.join()
def main():
logging.basicConfig(format=LOGFMT, level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--http_port',
default=HTTP_PORT)
parser.add_argument('--websocket_port',
default=WEBSOCKET_PORT)
args = parser.parse_args()
pusher = SimplePusher(args)
pusher.start()
try:
while True:
time.sleep(100)
except:
pass
pusher.stop()
if __name__ == '__main__':
main()
| {
"repo_name": "tomwilkie/awesomation",
"path": "src/pi/simple_pusher.py",
"copies": "1",
"size": "4124",
"license": "mit",
"hash": -5561247637940887000,
"line_mean": 28.6690647482,
"line_max": 75,
"alpha_frac": 0.6741028128,
"autogenerated": false,
"ratio": 3.9053030303030303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9978527544321996,
"avg_score": 0.020175659756206863,
"num_lines": 139
} |
"""A really simple 'Hello, World' webapp to test snakeweb."""
from wsgiref.simple_server import make_server
from snakeguice import create_injector
from snakeguice.modules import Module
from snakeguice.extras import snakeweb
class HWController(object):
def index(self, request):
return snakeweb.Response('Hello, World!<br>I see you are from: {0}'
.format(request.remote_addr))
def hello_name(self, request, name):
return snakeweb.Response('Hello, {0}!'.format(name))
class HWModule(Module):
def configure(self, binder):
self.install(binder, HWRoutes())
class HWRoutes(snakeweb.RoutesModule):
def configure(self, routes_binder):
routes_binder.connect('/', controller=HWController)
routes_binder.connect('/:name', controller=HWController,
action='hello_name')
if __name__ == '__main__':
injector = create_injector(HWModule())
httpd = make_server('', 8000, snakeweb.Application(injector))
httpd.serve_forever()
| {
"repo_name": "dstanek/snake-guice",
"path": "examples/snakeweb/simple_hello_world.py",
"copies": "2",
"size": "1051",
"license": "mit",
"hash": -3842863588822469000,
"line_mean": 28.1944444444,
"line_max": 75,
"alpha_frac": 0.6593720266,
"autogenerated": false,
"ratio": 3.687719298245614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 36
} |
""" A really simple internal message bus for PubTal.
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import logging
class MessageBus:
def __init__ (self):
# This contains eventType: FunctionDictionary pairs.
self.listeners = {}
# This contains (eventType, dataValue):FunctionDictionary pairs
self.filterListeners = {}
self.log = logging.getLogger ("MessageBus")
def registerListener (self, eventType, func):
currentListeners = self.listeners.get (eventType, {})
currentListeners [func] = func
self.listeners [eventType] = currentListeners
self.log.info ("Function %s registered for event type %s" % (repr (func), eventType))
def unregisterListener (self, eventType, func):
currentListeners = self.listeners.get (eventType, {})
try:
del currentListeners [func]
self.log.info ("Function %s un-registered for event type %s" % (repr (func), eventType))
except:
self.log.warn ("Function %s was not registered for event type %s, but tried to unregister." % (repr (func), eventType))
def registerFilterListener (self, eventType, dataValue, func):
currentListeners = self.filterListeners.get ((eventType, dataValue), {})
currentListenter [func] = func
self.filterListeners [(eventType, dataValue)] = currentListeners
def unregisterFilterListener (self, eventType, dataValue, func):
currentListeners = self.filterListeners.get ((eventType, dataValue), {})
try:
del currentListeners [func]
except:
self.log.warn ("Function %s was not registered for event type %s, but tried to unregister." % (repr (func), eventType))
def notifyEvent (self, eventType, data=None):
currentListeners = self.listeners.get (eventType, {})
for listener in currentListenters.values():
listener (eventType, data)
filterListeners = self.filterListeners.get ((eventType, data), None)
if (filterListeners is not None):
for listener in filterListeners.values():
listener (eventType, data)
| {
"repo_name": "owlfish/pubtal",
"path": "devtools/MessageBusWithFilter.py",
"copies": "1",
"size": "3420",
"license": "bsd-3-clause",
"hash": 2307706419101166000,
"line_mean": 44.0131578947,
"line_max": 122,
"alpha_frac": 0.7529239766,
"autogenerated": false,
"ratio": 3.944636678200692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9367772265924438,
"avg_score": 0.1659576777752509,
"num_lines": 76
} |
# A really simple rotating logger
import time
class Logger(object):
DEBUG = 10
INFO = 20
WARNING = 30
WARN = WARNING
ERROR = 40
def __init__(self, outputfile, max_len, level=INFO,
interactive=False):
# Specify a logfile, the max length of the file before rotating,
# the minimum level to log, and whether this is being run interactivly.
# If it is interactive, we also print messages in addition to saving
# them to the file.
self.outputfile = outputfile
self.backupfile = outputfile + '.1'
self.max_len = max_len
self.level = level
self.interactive = interactive
self.size_written = 0
import os
try:
os.stat(self.outputfile)
# If we get here there is an old log file in place.
# We force a rotation.
self._rotate()
except:
self.fileobj = open(outputfile, 'w')
def _write(self, levelname, msg):
lt = time.localtime()
data = '%d-%02d-%02d %02d:%02d:%02d [%s] %s\n' % \
(lt[0], lt[1], lt[2], lt[3], lt[4], lt[5], levelname, msg)
if (len(data) + self.size_written)>self.max_len:
self.fileobj.close()
self._rotate()
self.fileobj.write(data)
self.fileobj.flush()
self.size_written += len(data)
if self.interactive:
print(data, end="")
def _rotate(self):
import os
try:
os.stat(self.backupfile)
# this only gets run if the file exists
os.remove(self.backupfile)
except:
pass
print("running rotate")
os.rename(self.outputfile, self.backupfile)
self.size_written = 0
self.fileobj = open(self.outputfile, 'w')
def debug(self, msg):
if self.level <= Logger.DEBUG:
self._write('DBG', msg)
def info(self, msg):
if self.level <= Logger.INFO:
self._write('INF', msg)
def warning(self, msg):
if self.level <= Logger.WARNING:
self._write('WRN', msg)
warn = warning
def error(self, msg):
if self.level <= Logger.ERROR:
self._write('ERR', msg)
def set_level(self, level):
self.level = level
def close(self):
self.fileobj.close()
self.fileobj = None
_logger = None
def initialize_logging(filename, max_len=32000, level=Logger.INFO,
interactive=False):
global _logger
if _logger!=None:
raise Exception("Logger was already initialized!")
_logger = Logger(filename, max_len, level, interactive)
def get_logger():
global _logger
if _logger!=None:
return _logger
else:
raise Exception("Logger was not yet initialized!")
def close_logging():
global _logger
if _logger:
_logger.close()
_logger = None
| {
"repo_name": "mpi-sws-rse/thingflow-python",
"path": "micropython/logger.py",
"copies": "2",
"size": "2948",
"license": "apache-2.0",
"hash": -9000560593428604000,
"line_mean": 27.9019607843,
"line_max": 79,
"alpha_frac": 0.5576662144,
"autogenerated": false,
"ratio": 3.9359145527369828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00940996653844951,
"num_lines": 102
} |
"""A really simple url shortener for App Engine."""
from google.appengine.api import users
from snakeguice import inject
from snakeguice.config import Config
from mako.template import Template
import webob
import interfaces
class ShortsController(object):
@inject(auth_service=interfaces.AuthService,
short_service=interfaces.LinkShorteningService)
def __init__(self, auth_service, short_service):
self.auth_service = auth_service
self.short_service = short_service
def index(self, request):
t = Template(filename='index.mako')
return webob.Response(t.render(links=self.short_service.last(5),
base_url=request.host_url))
def create(self, request):
if not self.auth_service.authorize(users.get_current_user()):
# this could be handled better, but I'm lazy
location = users.create_login_url(request.application_url)
return webob.exc.HTTPFound(location=location)
# I'm purposely not validating yet. I'm not a moron.
link = self.short_service.create_link(request.POST.get('url'))
location = '/#created:{0}'.format(
link.shortened_url(request.host_url))
return webob.exc.HTTPMovedPermanently(location=location)
def redirect(self, request, code):
link = self.short_service.get_link(code=code)
if not link:
return webob.exc.HTTPNotFound()
return webob.exc.HTTPMovedPermanently(location=link.url)
def favicon(self, request):
return webob.exc.HTTPNotFound() # maybe i can implement this later...
| {
"repo_name": "dstanek/shorts",
"path": "controllers.py",
"copies": "1",
"size": "1641",
"license": "mit",
"hash": 5685440687697836000,
"line_mean": 35.4666666667,
"line_max": 77,
"alpha_frac": 0.6660572821,
"autogenerated": false,
"ratio": 4.041871921182266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002849002849002849,
"num_lines": 45
} |
"""Area/message template importer for CAPCollector project.
Imports area/messages template files to corresponding SQL tables.
File names must end with .xml.
An area template is a single CAP <area> block.
A message template is a single CAP <alert> block with a single <info> block;
<area> blocks are ignored.
Run
$ python manage.py import_templates area /home/user/path/to/templates/
to import area templates or
$ python manage.py import_templates message /home/user/path/to/templates/
to import message templates.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/custom-management-commands/
"""
__author__ = "arcadiy@google.com (Arkadii Yakovets)"
import os
import uuid
from core import models
from core import utils
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
class Command(BaseCommand):
"""Template importer command implementation."""
args = "<templates_type templates_path>"
help = "Imports existing area or message template files to SQL tables."
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError(
"Wrong arguments number! Please use python manage.py import_templates"
" template_type template_path (e.g. python manage.py import_templates"
" area /home/user/path/to/templates/ or python manage.py"
" import_templates message /home/user/path/to/templates/")
templates_type = args[0]
templates_path = args[1]
template_objects = []
for file_name in os.listdir(templates_path):
if not file_name.endswith(".xml"):
print "Ignored file: %s" % file_name
continue
file_path = os.path.join(templates_path, file_name)
with open(file_path, "r") as template_file:
template_content = template_file.read()
template_dict = utils.ParseAlert(template_content, "xml", uuid.uuid4())
if templates_type == "area":
template_model = models.AreaTemplate
elif templates_type == "message":
template_model = models.MessageTemplate
template_obj = template_model()
template_obj.title = file_name.rstrip(".xml").strip()
template_obj.content = template_content
template_objects.append(template_obj)
# Save to DB.
template_model.objects.bulk_create(template_objects)
| {
"repo_name": "CAPTools/CAPCollector",
"path": "core/management/commands/import_templates.py",
"copies": "2",
"size": "2354",
"license": "bsd-3-clause",
"hash": 5003252010830021000,
"line_mean": 33.115942029,
"line_max": 80,
"alpha_frac": 0.707306712,
"autogenerated": false,
"ratio": 3.865353037766831,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0052421802512180775,
"num_lines": 69
} |
"""A reanalysis of Rosenbluth measurements of the proton form factors
(main analysis routine)
Copyright (c) Alexander Gramolin, 2016
https://github.com/gramolin/rosenbluth/
"""
import numpy as np
import pandas as pd
import radcorr as rc
# Overall normalization uncertainties of the cross sections (%):
norm_uncert = [1.90, 1.77, 1.77] # For Sets 1, 2, and 3, respectively
# Reading the input data:
data = pd.read_csv('data_input.csv')
# Numbers of data points in each of the three sets:
N1 = len(data[data['set']==1])
N2 = len(data[data['set']==2])
N3 = len(data[data['set']==3])
print('Numbers of data points: N1 = ' + str(N1) + ', N2 = ' + str(N2) + ', N3 = ' + str(N3) + '\n')
# Objects of the Kinematics and FormFactors classes:
kin = rc.Kinematics()
ff = rc.FormFactors('Custom') # Custom parameterization
# Target material (hydrogen):
mat = rc.Material(Z=1, A=1.00794, X0=63.04)
"""Reapplying radiative corrections"""
for i in range(N1+N2+N3):
# Setting the actual kinematics using the E1 and theta values:
kin.Set_E1_theta(E1=data['E1'][i], theta=rc.DegToRad(data['theta'][i]))
# Calculating the cut parameter, Delta E:
DeltaE = kin.Get_DeltaE(data['WW_cut'][i])
# The standard radiative corrections according to Maximon and Tjon:
data.loc[i,'d_MTj'] = rc.delta_MaximonTjon(kinematics=kin, DeltaE=DeltaE)
# An additional vacuum polarization correction:
data.loc[i,'d_vac'] = rc.delta_vac_mu(kin.Get_QQ()) \
+ rc.delta_vac_tau(kin.Get_QQ()) \
+ rc.delta_vac_q(kin.Get_QQ())
# An additional correction due to internal bremsstrahlung:
data.loc[i,'d_IntBr'] = rc.delta_IntBr(DeltaE=DeltaE, kinematics=kin, ff=ff)
# The external bremsstrahlung correction:
data.loc[i,'d_ExtBr'] = rc.delta_ExtBr(DeltaE=DeltaE, kinematics=kin,
t_i=0.01*data['t_i'][i], t_f=0.01*data['t_f'][i],
mat_i=mat, mat_f=mat, ff=ff)
# The correction factor due to ionization losses:
data.loc[i,'C_L'] = rc.C_Landau(DeltaE=DeltaE, kinematics=kin,
t_i=0.01*data['t_i'][i], t_f=0.01*data['t_f'][i],
mat_i=mat, mat_f=mat)
# Old radiative correction factors:
data['C_old'] = np.exp(data['d_int1'] + data['d_int2'] + data['d_ext'])
# New radiative correction factors:
data['C_new'] = np.exp(data['d_MTj'] + data['d_vac'] + data['d_IntBr'] + data['d_ExtBr'])*data['C_L']
# Applying the new radiative corrections:
data['C_ratio'] = data['C_old']/data['C_new']
data['sigma'] = data['sigma']*data['C_ratio']
# Saving results to the file "results_table.csv":
np.savetxt('results_table.csv',
data[['set','QQ','epsilon','d_MTj','d_vac','d_IntBr','d_ExtBr','C_L','C_new','C_ratio','sigma','estat','esyst','enorm']],
fmt='%i,%.3f,%.3f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.3e,%.2f,%.2f,%.2f',
header='set,QQ,epsilon,d_MTj,d_vac,d_IntBr,d_ExtBr,C_L,C_new,C_ratio,sigma,estat,esyst,enorm', comments='')
"""Reduced cross sections and their uncertainties"""
for i in range(N1+N2+N3):
# Setting the nominal kinematics using the Q^2 and epsilon values:
kin.Set_QQ_epsilon(QQ=data['QQ'][i], epsilon=data['epsilon'][i])
# Reduced cross sections (dimensionless):
data.loc[i,'sigma_red'] = data['sigma'][i]*kin.Get_epsilon()*(1. + kin.Get_tau())/rc.GeVToNb(kin.Get_sigma_Mott())
# Normalization to the dipole form factor:
data.loc[i,'sigma_red'] = data['sigma_red'][i]/(rc.GD(kin.Get_QQ()))**2
# Combined statistical and systematic uncertainties:
data.loc[i,'e_sigma_red'] = 0.01*np.sqrt(data['estat'][i]**2 + data['esyst'][i]**2)*data['sigma_red'][i]
# Values of tau:
data.loc[i,'tau'] = kin.Get_tau()
"""Minimization of the chi-square function using linear algebra.
The vector x of the best-fit parameters is obtained
after solving the matrix equation A*x = b.
"""
# An auxiliary matrix X (its columns correspond to the partial derivatives
# of the chi-square function with respect to n1, n2, n3, a1, a2, a3, b1, b2, and b3):
X = np.zeros((N1+N2+N3,9))
X[:N1,0] = data['sigma_red'][:N1]/data['e_sigma_red'][:N1]
X[N1:N1+N2,1] = data['sigma_red'][N1:N1+N2]/data['e_sigma_red'][N1:N1+N2]
X[N1+N2:,2] = data['sigma_red'][N1+N2:]/data['e_sigma_red'][N1+N2:]
X[:,3] = data['epsilon']*data['tau']/data['e_sigma_red']
X[:,4] = X[:,3]*data['tau']
X[:,5] = X[:,4]*data['tau']
X[:,6] = ((rc.mu*data['tau'])**2)/data['e_sigma_red']
X[:,7] = X[:,6]*data['tau']
X[:,8] = X[:,7]*data['tau']
# The coefficient matrix A:
A = np.dot(X.T, X)
for i in range(3):
A[i,i] = A[i,i] + 1./(0.01*norm_uncert[i])**2
# The covariance matrix (inverse to A):
Cov = np.linalg.inv(A)
# The vector b:
y = (data['epsilon'] + (rc.mu**2)*data['tau'])/data['e_sigma_red']
b = np.dot(X.T, y)
for i in range(3):
b[i] = b[i] + 1./(0.01*norm_uncert[i])**2
# The vector x of the best-fit parameters:
x = np.dot(Cov, b)
# Printing the best-fit parameters found:
print('Best-fit parameters:')
variables = ['n1', 'n2', 'n3', 'a1', 'a2', 'a3', 'b1', 'b2', 'b3']
for i in range(9):
print(variables[i] + ' = {0: .3f} +/- {1:.3f}'.format(x[i], np.sqrt(Cov[i,i])))
print('The best-fit parameters and the covariance matrix')
print('are output to the file "results_fit.csv".\n')
# Saving the best-fit parameters (the first line)
# and the covariance matrix (the next 9 lines) to the file "results_fit.csv":
np.savetxt('results_fit.csv', np.vstack((x, Cov)), delimiter=',',
fmt='%+.3e', header='n1,n2,n3,a1,a2,a3,b1,b2,b3', comments='')
# Calculating and printing the chi-square value achieved:
Chisq = np.dot((y - np.dot(X, x)).T, y - np.dot(X, x))
for i in range(3):
Chisq = Chisq + ((x[i] - 1)/(0.01*norm_uncert[i]))**2
print('Chi-square value: ' + str(round(Chisq, 1)) + ' for ' + str(N1+N2+N3-9) + ' degrees of freedom')
| {
"repo_name": "gramolin/rosenbluth",
"path": "reanalysis.py",
"copies": "1",
"size": "5884",
"license": "mit",
"hash": 5746635078529386000,
"line_mean": 37.9668874172,
"line_max": 132,
"alpha_frac": 0.6169272604,
"autogenerated": false,
"ratio": 2.5560382276281493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8480754783862692,
"avg_score": 0.03844214083309134,
"num_lines": 151
} |
"""A reanalysis of Rosenbluth measurements of the proton form factors
(script to make Figure 1)
Copyright (c) Alexander Gramolin, 2016
https://github.com/gramolin/rosenbluth/
"""
import numpy as np
import pandas as pd
import radcorr as rc
import matplotlib.pyplot as plt
# Some settings for the figure:
plt.rc('text', usetex=True)
plt.rc('xtick', labelsize=19)
plt.rc('xtick.major', size=9)
plt.rc('ytick', labelsize=19)
plt.rc('ytick.major', size=9)
plt.rc('ytick.minor', size=4)
plt.figure(1, figsize=(7, 5.5))
plt.subplots_adjust(left=0.16, right=0.98, bottom=0.16, top=0.97)
plt.semilogy() # Log vertical scale
# The axis labels:
plt.xlabel(r"$E_3,~\mathrm{GeV}$", fontsize=20, labelpad=12)
plt.ylabel(r"$d^2 \sigma_{\mathrm{int.br.}} / (d \Omega \, d E_3),~\mathrm{GeV}^{-3} \, \mathrm{sr}^{-1}$", fontsize=20)
# Reading the Monte Carlo data obtained using the ESEPP event generator
# (see https://github.com/gramolin/esepp):
esepp = pd.read_csv('data_esepp.csv')
# Plotting the Monte Carlo data:
plt.errorbar(esepp['E3'], esepp['cross_section'], xerr=0.012, fmt='ok', markersize=6, capsize=0, linewidth=1.5)
# An object of the FormFactors class:
ff = rc.FormFactors('Dipole') # The dipole parameterization
# Setting the kinematics (E1 = 1 GeV, theta = 70 deg):
kin = rc.Kinematics(E1=1., theta=rc.DegToRad(70.))
# Radiative tail according to the soft-photon approximation:
xx = np.arange(0.005, kin.Get_E3()-0.001, 0.001)
tail_soft = rc.sigma_IntBr_soft(E3=xx, kinematics=kin, ff=ff)
# More accurate description of the radiative tail:
tail_hard = rc.sigma_IntBr(E3=xx, kinematics=kin, ff=ff)
tail_hard = tail_hard + (2.*rc.alpha/rc.pi)*(1./(kin.Get_E3() - xx))*(2.*np.log(kin.Get_eta()) + \
kin.Get_E4()*np.log(kin.Get_x())/kin.Get_p4() - 1.)*rc.sigma_Rosenbluth(E1=1., theta=rc.DegToRad(70.), ff=ff)
# Plotting the curves:
plt.plot(xx, tail_hard, '-r', linewidth=2, alpha=0.8) # Red solid line
plt.plot(xx, tail_soft, '--b', linewidth=2) # Blue dashed line
# Saving the figure to pdf and png files:
plt.savefig('fig1.pdf')
plt.savefig('fig1.png')
| {
"repo_name": "gramolin/rosenbluth",
"path": "fig1.py",
"copies": "1",
"size": "2079",
"license": "mit",
"hash": 7955526139429504000,
"line_mean": 35.4736842105,
"line_max": 120,
"alpha_frac": 0.6921596922,
"autogenerated": false,
"ratio": 2.5078407720144753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8578949688777454,
"avg_score": 0.024210155087404227,
"num_lines": 57
} |
"""A reanalysis of Rosenbluth measurements of the proton form factors
(script to make Figure 2)
Copyright (c) Alexander Gramolin, 2016
https://github.com/gramolin/rosenbluth/
"""
import numpy as np
import pandas as pd
import radcorr as rc
import matplotlib.pyplot as plt
# Some settings for the figure:
plt.rc('text', usetex=True)
plt.rc('xtick', labelsize=19)
plt.rc('xtick.major', size=9)
plt.rc('ytick', labelsize=19)
plt.rc('ytick.major', size=9)
plt.rc('ytick.minor', size=4)
f, axarr = plt.subplots(3, sharex=True, figsize=(7, 15))
f.subplots_adjust(left=0.15, right=0.97, bottom=0.06, top=0.98, hspace=0.07)
# The axis labels:
axarr[0].set_ylabel(r"$G_E / G_D$", fontsize=20)
axarr[1].set_ylabel(r"$G_M / (\mu \, G_D)$", fontsize=20)
axarr[2].set_ylabel(r"$\mu \, G_E / G_M$", fontsize=20)
axarr[2].set_xlabel(r"$Q^2,~\mathrm{GeV}^2$", fontsize=20, labelpad=9)
# The "(a)", "(b)", and "(c)" labels:
axarr[0].text(0.5, 0.1, '(a)', fontsize=22)
axarr[1].text(0.5, 0.9, '(b)', fontsize=22)
axarr[2].text(0.5, 0.1, '(c)', fontsize=22)
# The axis limits:
axarr[0].set_ylim(-0.1,2.5)
axarr[1].set_ylim(0.885,1.10)
axarr[2].set_xlim(0,9.2)
axarr[2].set_ylim(-0.1,2.5)
axarr[2].set_xticks(range(10))
# Initializing some arrays:
QQ1 = np.zeros(100)
QQ2 = np.zeros(100)
fits = np.zeros((3,100))
bands = np.zeros((3,100))
kelly = np.zeros((3,100))
diff = np.zeros((3,6))
sigma = np.zeros((6,6))
"""Plotting the Kelly fit, our fit, and the confidence bands"""
# Reading the best-fit parameters and the covariance matrix:
fit = pd.read_csv('results_fit.csv')
for j in range(100):
# Q^2 values:
QQ1[j] = 1. + 7.83*j/99. # From 1 to 8.83 GeV^2
QQ2[j] = 10*j/99. # From 0 to 10 GeV^2
# Tau value:
tau = QQ1[j]/(2.*rc.m_p)**2
# Kelly fit:
kelly[0,j] = rc.GE_Kelly(QQ2[j])/rc.GD(QQ2[j]) # G_E/G_D
kelly[1,j] = rc.GM_Kelly(QQ2[j])/(rc.mu*rc.GD(QQ2[j])) # G_M/(mu*G_D)
kelly[2,j] = kelly[0,j]/kelly[1,j] # mu*G_E/G_M
# Our fit:
fits[0,j] = rc.GE_fit(QQ1[j], fit['a1'][0], fit['a2'][0], fit['a3'][0])/rc.GD(QQ1[j])
fits[1,j] = rc.GM_fit(QQ1[j], fit['b1'][0], fit['b2'][0], fit['b3'][0])/(rc.mu*rc.GD(QQ1[j]))
fits[2,j] = fits[0,j]/fits[1,j]
# Partial derivatives of G_E/G_D with respect to a1, a2, and a3:
diff[0,0] = -tau/(2.*fits[0,j])
diff[0,1] = diff[0,0] * tau
diff[0,2] = diff[0,0] * tau**2
# Partial derivatives of G_M/(mu*G_D) with respect to b1, b2, and b3:
diff[1,3] = -tau/(2.*fits[1,j])
diff[1,4] = diff[1,3] * tau
diff[1,5] = diff[1,3] * tau**2
# Partial derivatives of mu*G_E/G_M with respect to a1, a2, and a3:
diff[2,0] = -tau/(2.*fits[0,j]*fits[1,j])
diff[2,1] = diff[2,0] * tau
diff[2,2] = diff[2,0] * tau**2
# Partial derivatives of mu*G_E/G_M with respect to b1, b2, and b3:
diff[2,3] = tau*fits[0,j]/(2.*fits[1,j]**3)
diff[2,4] = diff[2,3] * tau
diff[2,5] = diff[2,3] * tau**2
# Elements of the covariance matrix:
sigma[0,:] = fit['a1'][4:]
sigma[1,:] = fit['a2'][4:]
sigma[2,:] = fit['a3'][4:]
sigma[3,:] = fit['b1'][4:]
sigma[4,:] = fit['b2'][4:]
sigma[5,:] = fit['b3'][4:]
# Calculation of the confidence bands:
for i in range(3):
for k in range(6):
for l in range(6):
bands[i,j] = bands[i,j] + diff[i,k]*diff[i,l]*sigma[k,l]
bands[i,j] = np.sqrt(bands[i,j])
# Plotting panel (a):
axarr[0].plot([0,10], [1,1], ':k') # Horizontal line
axarr[0].plot(QQ2, kelly[0], '--k', linewidth=2, alpha=0.7) # Kelly fit
axarr[0].plot(QQ1, fits[0], '-r', linewidth=2, alpha=0.6) # Our fit
axarr[0].fill_between(QQ1, fits[0] - bands[0], fits[0] + bands[0], color='r', alpha=0.4)
# Plotting panel (b):
axarr[1].plot([0,10], [1,1], ':k') # Horizontal line
axarr[1].plot(QQ2, kelly[1], '--k', linewidth=2, alpha=0.7) # Kelly fit
axarr[1].plot(QQ1, fits[1], '-r', linewidth=2, alpha=0.6) # Our fit
axarr[1].fill_between(QQ1, fits[1] - bands[1], fits[1] + bands[1], color='r', alpha=0.4)
# Plotting panel (c):
axarr[2].plot([0,10], [1,1], ':k') # Horizontal line
axarr[2].plot(QQ2, kelly[2], '--k', linewidth=2, alpha=0.7) # Kelly fit
axarr[2].plot(QQ1, fits[2], '-r', linewidth=2, alpha=0.6) # Our fit
axarr[2].fill_between(QQ1, fits[2] - bands[2], fits[2] + bands[2], color='r', alpha=0.4)
"""Plotting the original data points"""
# Reading input data from the file "data_unpolarized.csv":
df1 = pd.read_csv('data_unpolarized.csv')
exp1 = df1[df1['experiment']==1] # Data of Walker et al.
exp2 = df1[df1['experiment']==2] # Data of Andivahis et al.
# Panel (a):
axarr[0].errorbar(exp1['QQ'], exp1['GE'], yerr=[exp1['GE_error-'], exp1['GE_error+']], fmt='o', markersize=6, markerfacecolor='white', markeredgecolor='blue', color='blue', capsize=5)
axarr[0].errorbar(exp2['QQ'], exp2['GE'], yerr=[exp2['GE_error-'], exp2['GE_error+']], fmt='o', markersize=6, markeredgecolor='blue', color='blue', capsize=5)
# Panel (b):
axarr[1].errorbar(exp1['QQ'], exp1['GM'], yerr=[exp1['GM_error-'], exp1['GM_error+']], fmt='o', markersize=6, markerfacecolor='white', markeredgecolor='blue', color='blue', capsize=5)
axarr[1].errorbar(exp2['QQ'], exp2['GM'], yerr=[exp2['GM_error-'], exp2['GM_error+']], fmt='o', markersize=6, markeredgecolor='blue', color='blue', capsize=5)
# Panel (c):
axarr[2].errorbar(exp1['QQ'], exp1['GE_GM'], yerr=[exp1['GE_GM_error-'], exp1['GE_GM_error+']], fmt='o', markersize=6, markerfacecolor='white', markeredgecolor='blue', color='blue', capsize=5)
axarr[2].errorbar(exp2['QQ'], exp2['GE_GM'], yerr=[exp2['GE_GM_error-'], exp2['GE_GM_error+']], fmt='o', markersize=6, markeredgecolor='blue', color='blue', capsize=5)
"""Plotting the data of polarized measurements"""
# Reading input data from the file "data_polarized.csv":
df2 = pd.read_csv('data_polarized.csv')
# Combined statistical and systematic uncertainties:
df2['error'] = np.sqrt(df2['estat']**2 + df2['esyst']**2)
exp1 = df2[df2['experiment']==1] # Data of Punjabi et al. (2005)
exp2 = df2[df2['experiment']==2] # Data of Puckett et al. (2012)
exp3 = df2[df2['experiment']==3] # Data of Puckett et al. (2010)
# Plotting the data points with the corresponding error bars:
axarr[2].errorbar(exp1['QQ'], exp1['GE_GM'], yerr=exp1['error'], fmt='s', markersize=6, capsize=5, color='g')
axarr[2].errorbar(exp2['QQ'], exp2['GE_GM'], yerr=exp2['error'], fmt='^', markersize=8, capsize=5, color='g')
axarr[2].errorbar(exp3['QQ'], exp3['GE_GM'], yerr=exp3['error'], fmt='v', markersize=8, capsize=5, color='g')
"""Rosenbluth separation using the corrected cross sections"""
# Reading input data from the file "results_table.csv":
df3 = pd.read_csv('results_table.csv')
# Combined statistical and systematic uncertainties:
df3['error'] = 0.01*np.sqrt(df3['estat']**2 + df3['esyst']**2)
# Applying the new normalization factors:
for i in range(len(df3['set'])):
if df3['set'][i] == 1:
df3.loc[i,'sigma'] = df3['sigma'][i]*fit['n1'][0] # Set 1
elif df3['set'][i] == 2:
df3.loc[i,'sigma'] = df3['sigma'][i]*fit['n2'][0] # Set 2
else:
df3.loc[i,'sigma'] = df3['sigma'][i]*fit['n3'][0] # Set 3
# Q^2 values:
QQ_list = [1., 2.003, 2.497, 3.007, 1.75, 2.5, 3.25, 4., 5., 6., 7.]
# An array for storing results:
new_points = np.zeros((3,len(QQ_list)))
# An object of the Kinematics class:
kin = rc.Kinematics()
for i in range(len(QQ_list)):
# Q^2 value:
QQ = QQ_list[i]
# Tau value:
tau = QQ/(2.*rc.m_p)**2
# Epsilon values (arguments to fit):
x = df3[(df3['QQ'] - QQ)**2 < 1e-6]['epsilon'].values
# Calculation of the Mott cross sections:
sigma_Mott = np.zeros(len(x))
for j in range(len(x)):
kin.Set_QQ_epsilon(QQ=QQ, epsilon=x[j])
sigma_Mott[j] = kin.Get_sigma_Mott()
# Reduced cross sections (values to fit):
y = rc.NbToGeV(df3[(df3['QQ'] - QQ)**2 < 1e-6]['sigma'])*x*(1. + tau)/sigma_Mott
# Uncertainties of the reduced cross sections:
e = df3[(df3['QQ'] - QQ)**2 < 1e-6]['error']*y
# The slope and the intercept of the linear fit:
slope, intercept = rc.Linear_fitter(x, y, e)
# Results of the Rosenbluth separation:
new_points[0,i] = np.sqrt(slope)/rc.GD(QQ) # G_E/G_D
new_points[1,i] = np.sqrt(intercept/tau)/(rc.mu*rc.GD(QQ)) # G_M/(mu*G_D)
new_points[2,i] = new_points[0,i]/new_points[1,i] # mu*G_E/G_M
# Panel (a):
axarr[0].plot(QQ_list[:4], new_points[0][:4], marker=(4,2,45), color='k', markeredgewidth=1, linewidth=0, markersize=12)
axarr[0].plot(QQ_list[4:], new_points[0][4:], marker=(6,2,90), color='k', markeredgewidth=1, linewidth=0, markersize=12)
# Panel (b):
axarr[1].plot(QQ_list[:4], new_points[1][:4], marker=(4,2,45), color='k', markeredgewidth=1, linewidth=0, markersize=12)
axarr[1].plot(QQ_list[4:], new_points[1][4:], marker=(6,2,90), color='k', markeredgewidth=1, linewidth=0, markersize=12)
# Panel (c):
axarr[2].plot(QQ_list[:4], new_points[2][:4], marker=(4,2,45), color='k', markeredgewidth=1, linewidth=0, markersize=12)
axarr[2].plot(QQ_list[4:], new_points[2][4:], marker=(6,2,90), color='k', markeredgewidth=1, linewidth=0, markersize=12)
# Saving the figure to pdf and png files:
plt.savefig('fig2.pdf')
plt.savefig('fig2.png')
| {
"repo_name": "gramolin/rosenbluth",
"path": "fig2.py",
"copies": "1",
"size": "9148",
"license": "mit",
"hash": -4822253677351526000,
"line_mean": 37.4369747899,
"line_max": 192,
"alpha_frac": 0.6165282029,
"autogenerated": false,
"ratio": 2.349858720780889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8233090216646624,
"avg_score": 0.04665934140685326,
"num_lines": 238
} |
"""Area of a Rectangular Room"""
import tkcomponents
CONVERSION_FACTOR = 0.09290304
#pylint: disable=no-member
def area_of_rectangle(prompt, units):
"""Separate calculations from output.
prompt - a function that takes a message
to prompt the user and returns a number
units - either 'feet' or 'meters'
"""
if units == 'feet':
conversion_factor = CONVERSION_FACTOR
units_converted = 'meters'
elif units == 'meters':
conversion_factor = 1 / CONVERSION_FACTOR
units_converted = 'feet'
else:
raise ValueError("units must be either 'feet' or 'meters'")
length = prompt('What is the length of the room in {units}? '
.format(units=units))
width = prompt('What is the width of the room in {units}? '
.format(units=units))
area = length * width
area_converted = area * conversion_factor
print('You entered dimensions of {length} {units} by {width} {units}.'
.format(length=length, width=width, units=units))
print('The area is')
print('{area} square {units}'.format(area=area, units=units))
print('{area:.3f} square {units}'
.format(area=area_converted, units=units_converted))
def ex7():
"""Non-numeric inputs throw an exception"""
area_of_rectangle(lambda prompt: int(input(prompt)), 'feet')
def input_numeric(prompt):
"""Keep prompting the user until the number can be converted"""
while True:
try:
return int(input(prompt))
except ValueError:
print('Value entered must be a number.')
def ex7a():
"""Do not accept non-numeric inputs"""
area_of_rectangle(input_numeric, 'feet')
def ex7b():
"""Prompt the user for either feet or meters"""
while True:
units = input('Select the unit of measure, feet or meters: ')
if units in ['feet', 'meters']:
break
area_of_rectangle(input_numeric, units)
def ex7c():
"""GUI"""
# pylint: disable=no-member
from tkcomponents import create, input_stream, radio_stream, output_label
import rx
root = create('Area of a Rectangular Room')
conversions = {
'feet': (CONVERSION_FACTOR, 'meters'),
'meters': (1 / CONVERSION_FACTOR, 'feet'),
}
options = [(x, x) for x in conversions]
units = radio_stream(root, options, 0)
length = input_stream(root, units.map('What is the length of the room in {0}?'.format), 1)
width = input_stream(root, units.map('What is the width of the room in {0}?'.format), 2)
def callback(unit, length, width):
"""Called every time any of the arguments changes to update the output"""
try:
length = int(length)
width = int(width)
except ValueError:
return ''
(conversion_factor, units_converted) = conversions[unit]
area = length * width
area_converted = area * conversion_factor
return '\n'.join([
('You entered dimensions of {length} {units} by {width} {units}.'
.format(length=length, width=width, units=unit)),
'The area is',
('{area} square {units}'.format(area=area, units=unit)),
('{area:.3f} square {units}'
.format(area=area_converted, units=units_converted))
])
output_label(root, rx.Observable.combine_latest(units, length, width, callback), 3)
root.mainloop()
CONVERSIONS = {
'feet': (CONVERSION_FACTOR, 'meters'),
'meters': (1 / CONVERSION_FACTOR, 'feet'),
}
def attempted_refactor(component):
"""attempt to refactor the above implementations into something that utilizes observables"""
# pylint: disable=no-member
import rx
options = [(x, x) for x in CONVERSIONS]
units = component.radio_stream(options, 'feet')
length = component.input_stream(units.map('What is the length of the room in {0}?'.format))
width = component.input_stream(units.map('What is the width of the room in {0}?'.format))
def callback(unit, length, width):
"""Called every time any of the arguments changes to update the output"""
try:
length = int(length)
width = int(width)
except ValueError:
return ''
(conversion_factor, units_converted) = CONVERSIONS[unit]
area = length * width
area_converted = area * conversion_factor
return '\n'.join([
('You entered dimensions of {length} {units} by {width} {units}.'
.format(length=length, width=width, units=unit)),
'The area is',
('{area} square {units}'.format(area=area, units=unit)),
('{area:.3f} square {units}'
.format(area=area_converted, units=units_converted))
])
component.output_label(rx.Observable.combine_latest(units, length, width, callback))
class Gui():
"""A class of helper methods for working with tk"""
def __init__(self, title):
"""Create a tk GUI with helper methods"""
self.root = tkcomponents.create(title)
self.row = 0
def run(self):
"""Run the UI"""
self.root.mainloop()
def input_stream(self, stream):
"""Returns an input box with a label
whose text is given by the input stream
"""
return tkcomponents.input_stream(self.root, stream, self.__inc())
def radio_stream(self, options, default):
"""Returns a radio button with the options provided"""
return tkcomponents.radio_stream(self.root, options, self.__inc(), default)
def output_label(self, stream):
"""Create an output label of results given by the input stream"""
tkcomponents.output_label(self.root, stream, self.__inc())
def __inc(self):
"""increment the row counter and return the incremented value"""
self.row += 1
return self.row
class Cli():
"""A class of helper methods for working with the cli"""
# pylint: disable=no-self-use
def input_stream(self, stream):
"""Returns an input with a label
whose text is given by the input stream
"""
return stream.map(lambda prompt: int(input(prompt)))
def radio_stream(self, _options, default):
"""Returns a radio button with the options provided"""
from rx import Observable
return Observable.just(default)
def output_label(self, stream):
"""Create an output label of results given by the input stream"""
stream.subscribe(print)
def ex7refactor():
"""implementation of the attempted cli refactor"""
attempted_refactor(Cli())
def ex7refactorgui():
"""implementation of the attempted gui refactor"""
gui = Gui('Area of a rectangular room')
attempted_refactor(gui)
gui.run()
| {
"repo_name": "brad-h/expy",
"path": "ExPy/ExPy/module07.py",
"copies": "1",
"size": "6830",
"license": "mit",
"hash": 1346294664807402000,
"line_mean": 31.2169811321,
"line_max": 96,
"alpha_frac": 0.6162518302,
"autogenerated": false,
"ratio": 3.9988290398126463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004225662761105669,
"num_lines": 212
} |
""" areaSelect.py"""
import pygame
import dinosInSpace
import scroller56
import os
import dataStorage56
import spriteBasic
import infoGraphic56
import sector
import groupMods56
import gfx56
import screenWipe
import fpsSwitch
import soundFx56
import sparkleTrail
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ADD PORTAL |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| LOOK FOR THIS ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
BLACK = (0,0,0)
WHITE = (255,255,255)
LTGREY2 = (200,200,200)
LTGREY = (150,150,150)
GREY = (100,100,100)
DGREY = (60,60,60)
ORANGE = (255,125,0)
GREEN = (0,255,0)
RED = (255,0,0)
DARK_RED = (150,0,0)
BLUE = (0,0,255)
YELLOW = (255,255,0)
TEAL = (0,50,50)
FIELDSIZE = (8,8) # 12
XGRIDSTEP = 100
YGRIDSTEP = 100
MAX_SNAX = 24 # last puzzle unlocks at...
# info bar
INFOBAR_WIDTH = 200
INFOBAR_YSTEP = 25
INFOBAR_BOX_UNIT_SIZE = (INFOBAR_WIDTH, 2*INFOBAR_YSTEP)
LOCKED_FONTSIZE = 20
LOCKED_BKGCOLOR = WHITE
DEFAULT_FONTSIZE = 20
TITLE_FONTSIZE = 18
SMALL_FONTSIZE = 16
XTRA_SMALL_FONTSIZE = 12
IB_ALPHA = None
SNAX_COLLECTED_TEXT_COLOR = ORANGE
SNAX_GFX_SPACING = 30
SNAX_GFX_ALPHA = 100
SNAX_BOX_COLOR = ORANGE
PORTAL_NUMSIZE = 14
PORTAL_NUMCOLOR = (255,255,255)
PORTAL_NUMBLIT = (5,5)
PORTAL_SCALEBY = 20
PORTAL_LABEL_OFFSET = (0,-80)
PORTAL_LABEL_ALPHA_STD = 250
PORTAL_LABEL_ALPHA_BLINK = 150
PORTAL_LABEL_BLINK_DELAY = 5 # *2 for 60 fps
PORTAL_SGLYPH_SIZE = 16
PORTAL_SGLYPH_COLOR = DARK_RED
PORTAL_SGLYPH_BLIT = (5,25)
PORTAL_SGLYPH_TEXT = "s!"
SECTORS = [0, # 1
1, # 2 - 3 - 4
4, # 5 - 6 - 7 - 8 - 9 - 10
8, # 11 - 12 - 13 - 14 - 15 - 16
14,# 17 - 18 - 19 - 20 - 21 - 22
19,# 23 - 24 - 25 - 26 - 27 - 28
24,# 29 - 30 - 31 - 32 - 33 - 34
30,# 35 - 36 - 37 - 38 - 39 - 40
40]# 41
SNAX_ARCHIVE = "_snaxArchive"
SNAX_MILESTONE = "_snaxMilestone"
NON_PUZZLE_ENTRY = [SNAX_ARCHIVE, SNAX_MILESTONE, "quit to title", "interstellar snax ship"]
TUTORIAL_LENGTH = 7 # for totals reference, plz sync with actual number of TUT portals / puzzles %%%%%%%%%%%%%%%%%%%%%%%% TUTORIAL_LENGTH %%%%%%%%
##WITH_SPARKLE = True
##SPARK_SIZE = (6,6)
##SPARK_COLOR = WHITE
##SPARK_BOUNDS = (800,600)
##SPARK_FREQUENCY = 15 #1)--15
##SPARK_FADESPEED = 5 #1)--2
##SPARK_CENTER = (400,300)
##SPARK_VELOCITY = (0,0)
NEW_PROFILE = {
# puzzle name : [file name, locked, complete, difficulty, snacks collected, unlocked after...]
#
# -0 (string) _file name_ : passed as 'dest' to map selector (level)
# -1 (bool) _locked_ : controlls player access / preview
# -2 (bool) _complete_ : displays if complete, adds to global profile completed count
# -3 (int) _difficulty_ : displays difficulty level
# -4 (list) _snacks_ : displays how many snacks collected as list (0 or 1), pass 'None' if n/a
# -5 (int) _lock number_ : portal is unlocked after this number of total completed puzzles
#
#
# example:
#
# "roundabout" : ["2_ROUNDABOUT", True, False, 0, [0,0], SECTORS[1]],
#
# ********************
# _snaxArchive : [snackName1, snackName2, snackName3...]
# ********************
# misc data
"_snaxArchive" : [], # list of snax collected for snax screen to access
"_snaxMilestone" : [1,3,20], # num of snax required to unlock stuff, changes to 0 when achieved
# non-puzzle portals
"quit to title" : ["_EXIT", False, False, 0, None, SECTORS[0]],
"interstellar snax ship" : ["_SNAX", False, False, 0, None, SECTORS[0]],
#puzzle portals -- don't forget to sync tutorial length constant
"tutorial" : ["1_TUT1", False, False, 0, None, SECTORS[0]],
"tut2" : ["1_TUT2", False, False, 0, None, SECTORS[0]],
"tut3" : ["1_TUT3", False, False, 0, None, SECTORS[0]],
"tut4" : ["1_TUT4", False, False, 0, None, SECTORS[0]],
"tut5" : ["1_TUT5", False, False, 0, None, SECTORS[0]],
"tut6" : ["1_TUT6", False, False, 0, None, SECTORS[0]],
"tut7" : ["1_TUT7", False, False, 0, None, SECTORS[0]],
"odd color out" : ["2_ODD_COLOR_OUT", True, False, 0, None, SECTORS[1]], # 1
"small detour" : ["3_SMALL_DETOUR", True, False, 0, None, SECTORS[1]],
"go for it" : ["4_GO_FOR_IT", True, False, 0, [0], SECTORS[1]],
"switches" : ["5_SWITCHES", True, False, 0, None, SECTORS[2]], # 2
"space walk" : ["6_SPACE_WALK", True, False, 0, None, SECTORS[2]],
"wait for me" : ["7_WAIT_FOR_ME", True, False, 0, None, SECTORS[2]],
"crowded crew 1" : ["8_CROWDED_CREW_1", True, False, 0, None, SECTORS[2]],
"cross paths" : ["9_CROSS_PATHS", True, False, 0, [0,0], SECTORS[2]],
"loop" : ["10_LOOP", True, False, 0, [0,0], SECTORS[2]],
"island" : ["11_ISLAND", True, False, 0, None, SECTORS[3]], # 3
"sardines 1" : ["12_SARDINES_1", True, False, 0, [0], SECTORS[3]],
"current" : ["13_CURRENT", True, False, 0, None, SECTORS[3]],
"try the hard way" : ["14_TRY_THE_HARD_WAY", True, False, 0, [0], SECTORS[3]],
"double back" : ["15_DOUBLE_BACK", True, False, 0, None, SECTORS[3]],
"teamwork" : ["16_TEAMWORK", True, False, 0, None, SECTORS[3]],
"over and out" : ["17_OVER_AND_OUT", True, False, 0, None, SECTORS[4]], # 4
"symmetry 1" : ["18_SYMMETRY_1", True, False, 0, None, SECTORS[4]],
"corral" : ["19_CORRAL", True, False, 0, [0,0], SECTORS[4]],
"crowded crew 2" : ["20_CROWDED_CREW_2", True, False, 0, None, SECTORS[4]],
"split up" : ["21_SPLIT_UP", True, False, 0, [0,0], SECTORS[4]],
"asteroid field" : ["22_ASTEROID_FIELD", True, False, 0, None, SECTORS[4]],
"jump the fence" : ["23_JUMP_THE_FENCE", True, False, 0, None, SECTORS[5]], # 5
"symmetry 2" : ["24_SYMMETRY_2", True, False, 0, None, SECTORS[5]],
"separate" : ["25_SEPARATE", True, False, 0, None, SECTORS[5]],
"under your nose" : ["26_UNDER_YOUR_NOSE", True, False, 0, [0,0], SECTORS[5]],
"so close yet so far" : ["27_SO_CLOSE_YET_SO_FAR", True, False, 0, None, SECTORS[5]],
"sections" : ["28_SECTIONS", True, False, 0, None, SECTORS[5]],
"sardines 2" : ["29_SARDINES_2", True, False, 0, None, SECTORS[6]], # 6
"tasty trove" : ["30_TASTY_TROVE", True, False, 0, [0,0], SECTORS[6]],
"sandbar" : ["31_SANDBAR", True, False, 0, [0,0], SECTORS[6]],
"hopscotch" : ["32_HOPSCOTCH", True, False, 0, [0], SECTORS[6]],
"corners" : ["33_CORNERS", True, False, 0, [0], SECTORS[6]],
"copy cat" : ["34_COPY_CAT", True, False, 0, [0], SECTORS[6]],
"small loop" : ["35_SMALL_LOOP", True, False, 0, None, SECTORS[7]], # 7
"outpost" : ["36_OUTPOST", True, False, 0, None, SECTORS[7]],
"take a hike" : ["37_TAKE_A_HIKE", True, False, 0, None, SECTORS[7]],
"not so fast" : ["38_NOT_SO_FAST", True, False, 0, [0,0,0], SECTORS[7]],
"really crowded crew" : ["39_REALLY_CROWDED_CREW", True, False, 0, None, SECTORS[7]],
"rescue" : ["40_RESCUE", True, False, 0, [0], SECTORS[7]],
"gateway" : ["41_GATEWAY", True, False, 0, None, SECTORS[8]] # 8
} # ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ADD PORTAL |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| add snax ||||||||||||||||||||||||||||||||||||||||
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ^^^^^^^^^
class ImgLib(object):
""" image library to load and access local images """
imgDict = None
def __init__(self):
if not ImgLib.imgDict:
ImgLib.imgDict = {
"CURSOR" : dinosInSpace.loadImage("controlCursor.png", "2X", (21,21)),
"PUZZLE_PREVIEW" : dinosInSpace.loadImage("portalPrevDefault.png", "2X"),
## "TEST1" : dinosInSpace.loadImage("rock2.png", "2X", (0,0)),
## "TEST2" : dinosInSpace.loadImage("rock3.png", "2X", (0,0)),
## "TESTPREV" : dinosInSpace.loadImage("testPreview.png", (200,150), None),
## "TESTPREV_E" : dinosInSpace.loadImage("testPreviewEmpty.png", (200,150), None),
"LOCKED" : infoGraphic56.TextObject("locked", LOCKED_FONTSIZE, DARK_RED, None, True, False, LOCKED_BKGCOLOR).image.copy(),
"SNAX" : dinosInSpace.loadImage("hamburger.png", "2X", (0,0)),
"SNAX_LOCKED" : dinosInSpace.loadImage("hamburgerLocked.png", "2X", (0,0)),
"PORT_LOCK" : dinosInSpace.loadImage("portalLocked.png", "2X", (0,0)),
"PORT_OPEN" : dinosInSpace.loadImage("portalOpen.png", "2X", (0,0)),
"PORT_COMP" : dinosInSpace.loadImage("portalComplete.png", "2X", (0,0)),
"PORT_FLAG" : dinosInSpace.loadImage("flagBasic.png", "2X", (0,0)),
"PORT_FLAGPLUS" : dinosInSpace.loadImage("flagComplete.png", "2X", (0,0)),
"BACK_ARROW" : dinosInSpace.loadImage("backArrow.png", "2X", (0,0)),
"SNAX_BOX" : pygame.Surface(INFOBAR_BOX_UNIT_SIZE),
"SNAX_SUB" : dinosInSpace.loadImage("snaxSub.png", "2X", (0,0)),
"SNAX_SUB_PARTY" : dinosInSpace.loadImage("snaxSubParty.png", "2X", (0,0)),
"PORTAL_LABEL" : dinosInSpace.loadImage("portalLabel.png", "2X", (0,75)),
# map previews - requested by portals during init
"quit to title" : dinosInSpace.loadImage("pp_title.png"),
"interstellar snax ship" : dinosInSpace.loadImage("pp_snax.png"),
"tutorial" : dinosInSpace.loadImage("pp_tutorial.png"),
"go for it" : dinosInSpace.loadImage("pp_go_for_it.png"),
"odd color out" : dinosInSpace.loadImage("pp_odd_color_out.png"),
"small detour" : dinosInSpace.loadImage("pp_small_detour.png"),
"crowded crew 1" : dinosInSpace.loadImage("pp_crowded_crew_1.png"),
"switches" : dinosInSpace.loadImage("pp_switches.png"),
"loop" : dinosInSpace.loadImage("pp_loop.png"),
"sardines 1" : dinosInSpace.loadImage("pp_sardines_1.png"),
"corral" : dinosInSpace.loadImage("pp_corral.png"),
"cross paths" : dinosInSpace.loadImage("pp_cross_paths.png"),
"current" : dinosInSpace.loadImage("pp_current.png"),
"over and out" : dinosInSpace.loadImage("pp_over_and_out.png"),
"hopscotch" : dinosInSpace.loadImage("pp_hopscotch.png"),
"outpost" : dinosInSpace.loadImage("pp_outpost.png"),
"island" : dinosInSpace.loadImage("pp_island.png"),
"symmetry 1" : dinosInSpace.loadImage("pp_symmetry_1.png"),
"double back" : dinosInSpace.loadImage("pp_double_back.png"),
"try the hard way" : dinosInSpace.loadImage("pp_try_the_hard_way.png"),
"crowded crew 2" : dinosInSpace.loadImage("pp_crowded_crew_2.png"),
"small loop" : dinosInSpace.loadImage("pp_small_loop.png"),
"wait for me" : dinosInSpace.loadImage("pp_wait_for_me.png"),
"separate" : dinosInSpace.loadImage("pp_separate.png"),
"symmetry 2" : dinosInSpace.loadImage("pp_symmetry_2.png"),
"teamwork" : dinosInSpace.loadImage("pp_teamwork.png"),
"rescue" : dinosInSpace.loadImage("pp_rescue.png"),
"copy cat" : dinosInSpace.loadImage("pp_copy_cat.png"),
"jump the fence" : dinosInSpace.loadImage("pp_jump_the_fence.png"),
"space walk" : dinosInSpace.loadImage("pp_space_walk.png"),
"so close yet so far" : dinosInSpace.loadImage("pp_so_close_yet_so_far.png"),
"under your nose" : dinosInSpace.loadImage("pp_under_your_nose.png"),
"corners" : dinosInSpace.loadImage("pp_corners.png"),
"tasty trove" : dinosInSpace.loadImage("pp_tasty_trove.png"),
"sections" : dinosInSpace.loadImage("pp_sections.png"),
"sardines 2" : dinosInSpace.loadImage("pp_sardines_2.png"),
"really crowded crew" : dinosInSpace.loadImage("pp_really_crowded_crew.png"),
"sandbar" : dinosInSpace.loadImage("pp_sandbar.png"),
"asteroid field" : dinosInSpace.loadImage("pp_asteroid_field.png"),
"take a hike" : dinosInSpace.loadImage("pp_take_a_hike.png"),
"split up" : dinosInSpace.loadImage("pp_split_up.png"),
"not so fast" : dinosInSpace.loadImage("pp_not_so_fast.png"),
"gateway" : dinosInSpace.loadImage("pp_gateway.png"),
"quit to title_L" : dinosInSpace.loadImage("pp_title_L.png"),
"interstellar snax ship_L" : dinosInSpace.loadImage("pp_snax_L.png"),
"tutorial_L" : dinosInSpace.loadImage("pp_tutorial_L.png"),
"go for it_L" : dinosInSpace.loadImage("pp_go_for_it_L.png"),
"odd color out_L" : dinosInSpace.loadImage("pp_odd_color_out_L.png"),
"small detour_L" : dinosInSpace.loadImage("pp_small_detour_L.png"),
"crowded crew 1_L" : dinosInSpace.loadImage("pp_crowded_crew_1_L.png"),
"switches_L" : dinosInSpace.loadImage("pp_switches_L.png"),
"loop_L" : dinosInSpace.loadImage("pp_loop_L.png"),
"sardines 1_L" : dinosInSpace.loadImage("pp_sardines_1_L.png"),
"corral_L" : dinosInSpace.loadImage("pp_corral_L.png"),
"cross paths_L" : dinosInSpace.loadImage("pp_cross_paths_L.png"),
"current_L" : dinosInSpace.loadImage("pp_current_L.png"),
"over and out_L" : dinosInSpace.loadImage("pp_over_and_out_L.png"),
"hopscotch_L" : dinosInSpace.loadImage("pp_hopscotch_L.png"),
"outpost_L" : dinosInSpace.loadImage("pp_outpost_L.png"),
"island_L" : dinosInSpace.loadImage("pp_island_L.png"),
"symmetry 1_L" : dinosInSpace.loadImage("pp_symmetry_1_L.png"),
"double back_L" : dinosInSpace.loadImage("pp_double_back_L.png"),
"try the hard way_L" : dinosInSpace.loadImage("pp_try_the_hard_way_L.png"),
"crowded crew 2_L" : dinosInSpace.loadImage("pp_crowded_crew_2_L.png"),
"small loop_L" : dinosInSpace.loadImage("pp_small_loop_L.png"),
"wait for me_L" : dinosInSpace.loadImage("pp_wait_for_me_L.png"),
"separate_L" : dinosInSpace.loadImage("pp_separate_L.png"),
"symmetry 2_L" : dinosInSpace.loadImage("pp_symmetry_2_L.png"),
"teamwork_L" : dinosInSpace.loadImage("pp_teamwork_L.png"),
"rescue_L" : dinosInSpace.loadImage("pp_rescue_L.png"),
"copy cat_L" : dinosInSpace.loadImage("pp_copy_cat_L.png"),
"jump the fence_L" : dinosInSpace.loadImage("pp_jump_the_fence_L.png"),
"space walk_L" : dinosInSpace.loadImage("pp_space_walk_L.png"),
"so close yet so far_L" : dinosInSpace.loadImage("pp_so_close_yet_so_far_L.png"),
"under your nose_L" : dinosInSpace.loadImage("pp_under_your_nose_L.png"),
"corners_L" : dinosInSpace.loadImage("pp_corners_L.png"),
"tasty trove_L" : dinosInSpace.loadImage("pp_tasty_trove_L.png"),
"sections_L" : dinosInSpace.loadImage("pp_sections_L.png"),
"sardines 2_L" : dinosInSpace.loadImage("pp_sardines_2_L.png"),
"really crowded crew_L" : dinosInSpace.loadImage("pp_really_crowded_crew_L.png"),
"sandbar_L" : dinosInSpace.loadImage("pp_sandbar_L.png"),
"asteroid field_L" : dinosInSpace.loadImage("pp_asteroid_field_L.png"),
"take a hike_L" : dinosInSpace.loadImage("pp_take_a_hike_L.png"),
"split up_L" : dinosInSpace.loadImage("pp_split_up_L.png"),
"not so fast_L" : dinosInSpace.loadImage("pp_not_so_fast_L.png"),
"gateway_L" : dinosInSpace.loadImage("pp_gateway_L.png"),
} # ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ADD PORTAL |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
ImgLib.imgDict["SNAX_BOX"].fill(SNAX_BOX_COLOR)
@staticmethod
def getImage(name):
if name in ImgLib.imgDict:
return ImgLib.imgDict[name].copy()
else:
print "image, " + name + " not found"
class SelectScreen(object):
""" screen to select stages in story mode """
me = None
def __init__(self):
SelectScreen.me = self
self.scrollerGroup = pygame.sprite.GroupSingle() # scroller updates first
self.bkgGroup1 = pygame.sprite.RenderUpdates() # bottom level gfx
## self.sparkleGroup = pygame.sprite.OrderedUpdates()
self.portalGroup = pygame.sprite.OrderedUpdates() # level portals
self.sectorGroup = groupMods56.SR_OrderedUpdates() # sectors and sector counters
self.infoGroup = pygame.sprite.OrderedUpdates() # portal info right side of screen
self.portLabelGroup = pygame.sprite.Group() # labels over portals as prompts
self.cursorGroup = pygame.sprite.RenderUpdates() # cursor
self.currentProfile = None
self.keepGoing = None
self.dest = None
self.puzzleSelected = None
self.minSpeed = dinosInSpace.Game.getMinSpeed()
self.screen = pygame.display.get_surface()
self.background = pygame.Surface(self.screen.get_size())
self.background.fill((0,0,0))
self.screen.blit(self.background, (0,0))
## gfx56.drawBorder(self)
## pygame.display.update()
## # create sparkle trail #
## ########################
## self.withSparkle = WITH_SPARKLE
## self.sparkleEffect = None
## if self.withSparkle:
## self.sparkleEffect = sparkleTrail.SparkleTrail(SPARK_SIZE, SPARK_COLOR, SPARK_BOUNDS, SPARK_FREQUENCY, SPARK_FADESPEED, self, SPARK_CENTER, SPARK_VELOCITY)
## ########################
## ########################
@staticmethod
def wipe():
SelectScreen.me = None
def setCurrentProfile(self, currentProfile):
self.currentProfile = currentProfile
def getMinSpeed(self):
return self.minSpeed
def getPortalGroup(self):
return self.portalGroup
def addSpriteToGroup(self, sprite, group):
if group == "SCROLLER":
self.scrollerGroup.add(sprite)
elif group == "BKG1":
self.bkgGroup1.add(sprite)
elif group == "PORTAL":
self.portalGroup.add(sprite)
elif group == "SECTOR":
self.sectorGroup.add(sprite)
elif group == "INFO":
self.infoGroup.add(sprite)
elif group == "LABEL":
self.portLabelGroup.add(sprite)
elif group == "CURSOR":
self.cursorGroup.add(sprite)
def addSpriteListToGroup(self, spriteList, group):
for s in spriteList:
self.addSpriteToGroup(s, group)
def getInput(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.dest = "_EXIT"
if event.type == pygame.MOUSEBUTTONDOWN:
self.dest, self.puzzleSelected = AreaCursor.cursor.click()
if self.dest:
self.keepGoing = False
def runMe(self, _fps, imageFrom, swipeDirection):
clock = pygame.time.Clock()
self.clock = clock # bugfix for fps [infographic] access (rest of code uses local variable 'clock'
self.keepGoing = True
self.dest = None
while self.keepGoing:
clock.tick(_fps)
self.getInput()
self.bkgGroup1.clear(self.screen, self.background)
## self.sparkleGroup.clear(self.screen, self.background)
self.portalGroup.clear(self.screen, self.background)
self.sectorGroup.clear(self.screen, self.background)
self.infoGroup.clear(self.screen, self.background)
self.portLabelGroup.clear(self.screen, self.background)
self.cursorGroup.clear(self.screen, self.background)
if not imageFrom:
self.scrollerGroup.update()
self.bkgGroup1.update()
## self.sparkleGroup.update()
self.portalGroup.update()
if not imageFrom:
self.sectorGroup.update()
self.infoGroup.update()
self.portLabelGroup.update()
self.cursorGroup.update()
## # sparkle update cycle #
## ########################
## if self.withSparkle:
## newUnit = self.sparkleEffect.update()
## if newUnit:
## self.sparkleGroup.add(newUnit)
## print self.sparkleGroup
## ########################
## ########################
self.bkgGroup1.draw(self.screen)
## self.sparkleGroup.draw(self.screen)
self.portalGroup.draw(self.screen)
self.sectorGroup.draw(self.screen)
self.infoGroup.draw(self.screen)
self.portLabelGroup.draw(self.screen)
self.cursorGroup.draw(self.screen)
gfx56.drawBorder(self)
# ************************************************* #
# *** screen transition / destroy covered image *** #
# ************************************************* #
screenWipe.wipe(_fps, imageFrom, self.screen.copy(), swipeDirection)
imageFrom = None
# ************************************************* #
# ************************************************* #
# ************************************************* #
pygame.display.update()
# ************************************ #
# *** clear cursor / take snapshot *** #
# ************************************ #
self.cursorGroup.clear(self.screen, self.background)
self.bkgGroup1.draw(self.screen)
self.portalGroup.draw(self.screen)
self.sectorGroup.draw(self.screen)
self.infoGroup.draw(self.screen)
self.portLabelGroup.draw(self.screen)
gfx56.drawBorder(self)
snapshot = self.screen.copy()
# ************************************ #
# ************************************ #
# ************************************ #
if self.dest == "_SNAX":
soundFx56.SoundPlayer.requestSound("snaxScreen")
else:
soundFx56.SoundPlayer.requestSound("woosh_b")
return self.dest, self.puzzleSelected, snapshot
class ProfileDelegate(object):
""" container for players saved data
- holds access and scroll over data
- portal gets data by request
- only one instance exists avaliable thru class attribute 'curProfile'
- fetches data from saved file (via dataStorage module) when initialized
"""
curProfile = None
def __init__(self, profileName):
ProfileDelegate.curProfile = self
self.dataDict = dataStorage56.getProfile(profileName)
self.profileName = profileName
@staticmethod
def wipe():
ProfileDelegate.curProfile = None
def checkForUnlockedAndSyncData(self):
m = None
for milestone in self.dataDict[SNAX_MILESTONE]:
if milestone:
m = milestone
break
if m:
if len(self.dataDict[SNAX_ARCHIVE]) >= m:
dataStorage56.logNextMilestone(self.profileName)
def getPortalData(self, portal):
""" returns portal data from profile data - called by portals during init """
return self.dataDict[portal]
def getTotalsData(self):
""" returns totals data from current profile - called by InfoBar during init"""
mapsComplete = 0
totalMaps = len(self.dataDict) - len(NON_PUZZLE_ENTRY) - (TUTORIAL_LENGTH - 1)
snaxConsumed = 0
totalSnax = 0
for portal in self.dataDict:
if portal not in NON_PUZZLE_ENTRY:
if self.dataDict[portal][2]:
mapsComplete += 1
if self.dataDict[portal][4]:
for s in self.dataDict[portal][4]:
totalSnax += 1
if s:
snaxConsumed += 1
mapsRep = "PUZZLES SOLVED : " + str(mapsComplete) + " / " + str(totalMaps)
snaxRep = "TOTAL SNAX : " + str(snaxConsumed) + " / " + str(totalSnax)
return mapsRep, snaxRep
def getNumberSnaxCollected(self):
""" return the number of snax consumed and the total number of snax """
totalSnax = 0
snaxConsumed = 0
for portal in self.dataDict:
if portal not in NON_PUZZLE_ENTRY:
if self.dataDict[portal][4]:
for s in self.dataDict[portal][4]:
totalSnax += 1
if s:
snaxConsumed += 1
return snaxConsumed, totalSnax
def getMapsComplete(self):
mapsComplete = 0
for portal in self.dataDict:
if portal not in NON_PUZZLE_ENTRY:
if self.dataDict[portal][2]:
mapsComplete += 1
return mapsComplete
# def getNumTilUnlock(self, toUnlock):
# num = toUnlock - self.getMapsComplete()
# if num < 0:
# num = 0
#
# return num
def getNumTilUnlock(self):
allComplete = True
num = 0
for s in SECTORS:
if s > self.getMapsComplete():
num = s - self.getMapsComplete()
allComplete = False
break
return num
def getProfilePackage(self):
""" returns profileData in one piece - requested at launch to pass to level """
return self.dataDict
class Portal(pygame.sprite.Sprite):
"""
portal to puzzle
- responds to Scroller
- unique data accessed by request thru ProfileData
- scrolling over gives information
- instance var 'locked' controlls user access
"""
# profile data reference (see top)
#
# -0 (string) _file name_ : passed as 'dest' to map selector (level)
# -1 (bool) _locked_ : controlls player access / preview
# -2 (bool) _complete_ : displays if complete, adds to global profile completed count
# -3 (int) _difficulty_ : displays difficulty level
# -4 (list) _snacks_ : displays how many snacks collected as fraction, pass 'None' if n/a
# -5 (int) _lock number_ : portal is unlocked after this number of total completed puzzles
def __init__(self, name, imgLocked, imgUnlocked, imgComplete, location):
pygame.sprite.Sprite.__init__(self)
## self.SCALEFRAC = PORTAL_SCALEFRAC
## self.SCALE = imgLocked.get_width()/self.SCALEFRAC
self.SCALE = PORTAL_SCALEBY
self.name = name
# fetch stored profile data
(self.dest,
self.locked,
self.complete,
self.difficulty,
self.snax,
self.lockNum) = ProfileDelegate.curProfile.getPortalData(self.name)
## self.puzzleNumber = None
## if self.dest[0] != "_": # exclude _EXIT and _SNAX
## self.puzzleNumber = self.dest[:self.dest.index("_")]
if self.locked:
self.checkLock()
if self.locked:
self.original = imgLocked
elif self.complete:
self.original = imgComplete
else:
self.original = imgUnlocked
# if snax ship, check for all snax collected
if self.name == "interstellar snax ship":
snaxConsumed, totalSnax = ProfileDelegate.curProfile.getNumberSnaxCollected()
if snaxConsumed == totalSnax: # total snax should be 24
self.original = ImgLib.getImage("SNAX_SUB_PARTY")
# add puzzle number
if self.dest[0] != "_": # exclude _EXIT and _SNAX
numSurf = infoGraphic56.TextObject(self.dest[:self.dest.index("_")], PORTAL_NUMSIZE, PORTAL_NUMCOLOR).image
self.original.blit(numSurf, (PORTAL_NUMBLIT))
# add s glyph if snax left
if self.snax:
for s in self.snax:
if not s:
sGlyph = infoGraphic56.TextObject(PORTAL_SGLYPH_TEXT, PORTAL_SGLYPH_SIZE, PORTAL_SGLYPH_COLOR).image
self.original.blit(sGlyph, (PORTAL_SGLYPH_BLIT))
break
self.imgOver = pygame.transform.scale(self.original, (self.original.get_width() + self.SCALE, self.original.get_height() + self.SCALE))
self.image = self.original
self.rect = self.image.get_rect()
self.ready = False
self.minSpeed = SelectScreen.me.getMinSpeed()
self.previewImg = ImgLib.getImage(self.name)
self.rect.center = location
SelectScreen.me.addSpriteToGroup(self, "PORTAL")
# flag
self.flag = PortalFlag(self) if self.complete else None
if self.flag:
SelectScreen.me.addSpriteToGroup(self.flag, "PORTAL")
# label
self.label = None
if self.name == "tutorial" and not self.complete:
self.label = PortalLabel(self, ImgLib.getImage("PORTAL_LABEL"), "start here")
SelectScreen.me.addSpriteToGroup(self.label, "LABEL")
self.firstCycle = True
def update(self):
if not self.firstCycle:
self.checkCollision()
self.setSpeed()
else:
self.firstCycle = False
def checkLock(self):
mapsComplete = ProfileDelegate.curProfile.getMapsComplete()
if mapsComplete >= self.lockNum:
# handle last puzzle
if self.name == "gateway":
collectedSnax, totalSnax = ProfileDelegate.curProfile.getNumberSnaxCollected()
#if collectedSnax == totalSnax:
if collectedSnax == MAX_SNAX:
self.locked = False
# normal case
else:
self.locked = False
def getPuzzleName(self):
return self.name
def setSpeed(self):
xSpeedRatio, ySpeedRatio = scroller56.Scroller.speedData
dx = xSpeedRatio * self.minSpeed
dy = ySpeedRatio * self.minSpeed
self.rect.centerx += dx
self.rect.centery += dy
if self.flag:
self.flag.mimic(dx, dy)
if self.label:
self.label.mimic(dx, dy)
def checkCollision(self):
if pygame.sprite.collide_rect(self, AreaCursor.cursor):
if not self.ready:
self.setReady(True)
elif self.ready:
self.setReady(False)
def setReady(self, ready):
self.ready = ready
curCenter = self.rect.center
if self.ready:
self.image = self.imgOver
self.rect = self.image.get_rect()
self.rect.center = curCenter
if self.flag:
self.flag.mimic(-self.SCALE/2, -self.SCALE/2)
self.flag.scaleUp()
else:
self.image = self.original
self.rect = self.image.get_rect()
self.rect.center = curCenter
if self.flag:
self.flag.mimic(self.SCALE/2, self.SCALE/2)
self.flag.scaleDown()
AreaCursor.cursor.addReadyPortal(self) if self.ready else AreaCursor.cursor.removeReadyPortal(self)
InfoBar.me.display(self) if self.ready else InfoBar.me.clearDisplay(self)
def isLocked(self):
return self.locked
def getDest(self):
return self.dest
# def difToString(self):
# if self.difficulty == 0:
# rep = "easy"
# elif self.difficulty == 1:
# rep = "medium"
# elif self.difficulty == 2:
# rep = "hard"
# elif self.difficulty == 3:
# rep = "xtra hard"
# else:
# rep = "not recognized"
#
# return rep
def solvedToString(self):
if self.name == "quit to title" or self.name == "interstellar snax ship":
rep = "- - -"
else:
if self.complete:
rep = "Solved"
else:
rep = "Unsolved"
return rep
def snaxToString(self):
if self.snax:
total = len(self.snax)
consumed = 0
for s in self.snax:
if s:
consumed += 1
rep = str(consumed) + " / " + str(total)
else:
rep = "N/A"
return rep
class PortalFlag(pygame.sprite.Sprite):
""" flag that indicates full or partial completion """
def __init__(self, portal):
assert(portal.complete)
pygame.sprite.Sprite.__init__(self)
self.portal = portal
self.original = self.chooseFlag()
## self.SCALE = self.original.get_width()/portal.SCALEFRAC
self.SCALE = PORTAL_SCALEBY
self.imgOver = pygame.transform.scale(self.original,
(self.original.get_width() + self.SCALE,
self.original.get_height() + self.SCALE))
self.image = self.original
self.rect = self.image.get_rect()
self.rect.center = self.portal.rect.center
## self.rect.top = self.portal.rect.top - 45
## self.rect.left = self.portal.rect.left
def chooseFlag(self):
image = ImgLib.getImage("PORT_FLAGPLUS")
if self.portal.snax:
for s in self.portal.snax:
if not s:
image = ImgLib.getImage("PORT_FLAG")
return image
def mimic(self, dx, dy):
self.rect.centerx += dx
self.rect.centery += dy
def scaleUp(self):
self.image = self.imgOver
def scaleDown(self):
self.image = self.original
class PortalLabel(pygame.sprite.Sprite):
def __init__(self, portal, image, text, textSize=XTRA_SMALL_FONTSIZE, textColor=BLACK, centerRelativeToPortal=PORTAL_LABEL_OFFSET):
pygame.sprite.Sprite.__init__(self)
textSurface = infoGraphic56.TextObject(text, textSize, textColor).image
self.masterPortal = portal
self.image = gfx56.centerBlit(image, textSurface)
self.rect = self.image.get_rect()
self.rect.center = (self.masterPortal.rect.center[0] + centerRelativeToPortal[0], self.masterPortal.rect.center[1] + centerRelativeToPortal[1])
self.alphaStandard = PORTAL_LABEL_ALPHA_STD
self.alphaBlink = PORTAL_LABEL_ALPHA_BLINK
self.blinkDelay = PORTAL_LABEL_BLINK_DELAY
if fpsSwitch.FPSSwitch._fps == 60:
self.blinkDelay *= 2
self.blinkTick = self.blinkDelay
self.image.set_alpha(self.alphaStandard, pygame.RLEACCEL)
def mimic(self, dx, dy):
self.rect.centerx += dx
self.rect.centery += dy
self.blink()
def blink(self):
self.blinkTick -= 1
if self.blinkTick < 1:
self.blinkTick = self.blinkDelay
if self.image.get_alpha() == self.alphaStandard:
self.image.set_alpha(self.alphaBlink, pygame.RLEACCEL)
else:
self.image.set_alpha(self.alphaStandard, pygame.RLEACCEL)
class AreaCursor(pygame.sprite.Sprite):
"""
cursor to select portals to puzzles
- only one instance exists avaliable thru class attribute 'cursor'
"""
cursor = None
def __init__(self, img):
pygame.sprite.Sprite.__init__(self)
AreaCursor.cursor = self
self.readyPortals = []
self.image = img
self.rect = pygame.rect.Rect(0,0,1,1,)
self.firstCycle = True
SelectScreen.me.addSpriteToGroup(self, "CURSOR")
def update(self):
if not self.firstCycle:
self.rect.topleft = pygame.mouse.get_pos()
else:
self.rect.center = (2000,2000)
self.firstCycle = False
@staticmethod
def wipe():
AreaCursor.cursor = None
def addReadyPortal(self, portal):
self.readyPortals.append(portal)
def removeReadyPortal(self, portal):
self.readyPortals.remove(portal)
def click(self):
dest = None
puzzleName = None
if self.readyPortals and not self.readyPortals[0].isLocked():
dest = self.readyPortals[0].getDest()
puzzleName = self.readyPortals[0].getPuzzleName()
return dest, puzzleName
class InfoBar(spriteBasic.BasicRect):
"""
subclass of BasicRect manages and displays info for portals
- takes topRight instead of topleft for init positioning
- contains methods to control "features" which must be sprite objects
"""
me = None
def __init__(self, profileName, size, color, topRight, edgeWidth=None, alpha=None):
topLeft = (topRight[0] - size[0], topRight[1])
spriteBasic.BasicRect.__init__(self, size, color, topLeft, edgeWidth, alpha)
InfoBar.me = self
WIDTH = self.rect.width
HEIGHT = self.rect.height
LEFT = 0
RIGHT = size[0]
TOP = 0
BOTTOM = size[1]
XMID = size[0]/2
YMID = size[1]/2
YSTEP = INFOBAR_YSTEP
FONTSIZE_S = 16
PROFBOX_HEIGHT = 50
PROFBOX_COLOR = LTGREY
PROFNAME_COLOR = BLACK
self.featureColorOn = (255, 255, 255)
self.featureColorOff = (100, 100, 100)
self.featureFontSize = DEFAULT_FONTSIZE
self.smallFontSize = SMALL_FONTSIZE
self.featureDefaultText = "..."
self.extrasFontColorOn = (0, 0, 255)
# ---- make prof name, labels and totals (constant features)
# profile box
profBox_pos = (XMID, YSTEP)
profBox_genPt = "CENTER"
profBox = spriteBasic.BasicRect((WIDTH, PROFBOX_HEIGHT), PROFBOX_COLOR) # size, color, topLeft, edgeWidth,
# profile name (at top)
profName_pos = (XMID, YSTEP)
profName_genPt = "CENTER"
profName = infoGraphic56.TextObject(profileName, TITLE_FONTSIZE, PROFNAME_COLOR)
# labels
snaxBox_pos = (XMID, 13*YSTEP)
snaxBox_genPt = "CENTER"
snaxBox = spriteBasic.BasicImg(ImgLib.getImage("SNAX_BOX"))
snaxFrame_pos = (XMID, 15*YSTEP)
snaxFrame_genPt = "CENTER"
snaxFrame = spriteBasic.BasicRect(INFOBAR_BOX_UNIT_SIZE, ORANGE, None, 2)
snaxLabel_pos = (XMID, 13*YSTEP)
snaxLabel_genPt = "CENTER"
snaxLabel = infoGraphic56.TextObject("SNAX COLLECTED", self.smallFontSize, BLACK)
gTotalsLabel_pos = (XMID, 30*YSTEP)
gTotalsLabel_genPt = "CENTER"
gTotalsLabel = infoGraphic56.TextObject("Game Totals:", self.featureFontSize, self.featureColorOff)
# totals (at bottom)
################### get totals data ## ****************
(mapTotalsRep,
snaxTotalsRep) = ProfileDelegate.curProfile.getTotalsData()
numTilNextSet = ProfileDelegate.curProfile.getNumTilUnlock()
if ProfileDelegate.curProfile.getNumberSnaxCollected()[0] == MAX_SNAX:
unlockText1 = "get every snack"
unlockText2 = "to unlock last puzzle!"
else:
unlockText1 = "solve " + str(numTilNextSet) + " more puzzle";
if numTilNextSet > 1: unlockText1 += "s"
unlockText2 = "to unlock next set"
################### ------------------ ****************
mapTotals_pos = (XMID, 600 - 7*YSTEP)
mapTotals_genPt = "CENTER"
mapTotals = infoGraphic56.TextObject(mapTotalsRep, XTRA_SMALL_FONTSIZE, LTGREY, None, True) # last tag antialias
snaxTotals_pos = (XMID, 600 - 5*YSTEP)
snaxTotals_genPt = "CENTER"
snaxTotals = infoGraphic56.TextObject(snaxTotalsRep, XTRA_SMALL_FONTSIZE, LTGREY, None, True)
bottomMask_pos = (LEFT, 16*YSTEP)
bottomMask_genPt = "TOPLEFT"
bottomMask = spriteBasic.BasicRect((INFOBAR_WIDTH, 200), BLACK)
puzzleSolvedFrame_pos = (LEFT, 600 - 8*YSTEP)
puzzleSolvedFrame_genPt = "TOPLEFT"
puzzleSolvedFrame = spriteBasic.BasicRect(INFOBAR_BOX_UNIT_SIZE, LTGREY, None, 2)
snaxTotalsFrame_pos = (LEFT, 600 - 6*YSTEP)
snaxTotalsFrame_genPt = "TOPLEFT"
snaxTotalsFrame = spriteBasic.BasicRect(INFOBAR_BOX_UNIT_SIZE, LTGREY, None, 2)
nextUnlockT1_pos = (XMID, 600 - 3*YSTEP)
nextUnlockT1_genPt = "CENTER"
nextUnlockT1 = infoGraphic56.TextObject(str(unlockText1), XTRA_SMALL_FONTSIZE, BLACK, None, True)
nextUnlockT2_pos = (XMID, 600 - 1*YSTEP)
nextUnlockT2_genPt = "CENTER"
nextUnlockT2 = infoGraphic56.TextObject(str(unlockText2), XTRA_SMALL_FONTSIZE, BLACK, None, True)
nextUnlockBox_pos = (LEFT, 600 - 4*YSTEP)
nextUnlockBox_genPt = "TOPLEFT"
nextUnlockBox = spriteBasic.BasicRect((INFOBAR_WIDTH, 4*YSTEP), LTGREY)
# ---- make variable features:
# portal name
self.pName_pos = (XMID, 3*YSTEP)
self.pName_genPt = "CENTER"
self.pName_defaultText = "PUZZLE"
self.pName = infoGraphic56.TextObject(self.pName_defaultText, XTRA_SMALL_FONTSIZE, self.featureColorOff)
# portal preview graphic
self.pGraphic_pos = (XMID, 7*YSTEP)
self.pGraphic_genPt = "CENTER"
self.pGraphic_defaultImg = ImgLib.getImage("PUZZLE_PREVIEW")
self.pGraphic = spriteBasic.BasicImg(self.pGraphic_defaultImg)
# # portal difficulty
# self.pDif_pos = (XMID, 8*YSTEP)
# self.pDif_genPt = "CENTER"
# self.pDif_defaultText = "Difficulty"
# self.pDif = infoGraphic56.TextObject(self.pDif_defaultText, self.featureFontSize, self.featureColorOff)
# puzzle status solved
self.pSol_pos = (XMID, 11*YSTEP)
self.pSol_genPt = "CENTER"
self.pSol_defaultText = "Solved?"
self.pSol = infoGraphic56.TextObject(self.pSol_defaultText, self.featureFontSize, self.featureColorOff)
# puzzle snax text
self.pSnaxText_pos = (XMID, 15*YSTEP)
self.pSnaxText_posOn = (XMID, 15*YSTEP)
self.pSnaxText_genPt = "CENTER"
self.pSnaxText_defaultText = "- - -"
self.pSnaxText = infoGraphic56.TextObject(self.pSnaxText_defaultText, self.featureFontSize, self.featureColorOff)
# puzzle snax graphic
self.pSnax_pos = (2*WIDTH/3 + SNAX_GFX_SPACING, 15*YSTEP)
self.pSnax_genPt = "CENTER"
self.pSnax_defaultImg = ImgLib.getImage("SNAX"); self.pSnax_defaultImg.set_alpha(SNAX_GFX_ALPHA, pygame.RLEACCEL)
self.pSnax_liveImg = ImgLib.getImage("SNAX")
self.pSnax_lockedImg = ImgLib.getImage("SNAX_LOCKED")
self.pSnax = spriteBasic.BasicImg(self.pSnax_defaultImg)
self.pSnax2_pos = (WIDTH/3 - SNAX_GFX_SPACING, 15*YSTEP)
self.pSnax2_genPt = "CENTER"
self.pSnax2_defaultImg = ImgLib.getImage("SNAX"); self.pSnax2_defaultImg.set_alpha(SNAX_GFX_ALPHA, pygame.RLEACCEL)
self.pSnax2_liveImg = ImgLib.getImage("SNAX")
self.pSnax2_lockedImg = ImgLib.getImage("SNAX_LOCKED")
self.pSnax2 = spriteBasic.BasicImg(self.pSnax2_defaultImg)
# ---- initialize position of features and add to group:
self.setFeaturePosition(profBox, profBox_pos, profBox_genPt)
self.setFeaturePosition(profName, profName_pos, profName_genPt)
self.setFeaturePosition(snaxBox, snaxBox_pos, snaxBox_genPt)
self.setFeaturePosition(snaxFrame, snaxFrame_pos, snaxFrame_genPt)
self.setFeaturePosition(snaxLabel, snaxLabel_pos, snaxLabel_genPt)
self.setFeaturePosition(gTotalsLabel, gTotalsLabel_pos, gTotalsLabel_genPt)
self.setFeaturePosition(bottomMask, bottomMask_pos, bottomMask_genPt)
self.setFeaturePosition(puzzleSolvedFrame, puzzleSolvedFrame_pos, puzzleSolvedFrame_genPt)
self.setFeaturePosition(snaxTotalsFrame, snaxTotalsFrame_pos, snaxTotalsFrame_genPt)
self.setFeaturePosition(mapTotals, mapTotals_pos, mapTotals_genPt)
self.setFeaturePosition(snaxTotals, snaxTotals_pos, snaxTotals_genPt)
self.setFeaturePosition(nextUnlockBox, nextUnlockBox_pos, nextUnlockBox_genPt)
self.setFeaturePosition(nextUnlockT1, nextUnlockT1_pos, nextUnlockT1_genPt)
self.setFeaturePosition(nextUnlockT2, nextUnlockT2_pos, nextUnlockT2_genPt)
self.setFeaturePosition(self.pName, self.pName_pos, self.pName_genPt)
self.setFeaturePosition(self.pGraphic, self.pGraphic_pos, self.pGraphic_genPt)
self.setFeaturePosition(self.pSol, self.pSol_pos, self.pSol_genPt)
self.setFeaturePosition(self.pSnaxText, self.pSnaxText_pos, self.pSnaxText_genPt)
self.setFeaturePosition(self.pSnax, self.pSnax_pos, self.pSnax_genPt)
self.setFeaturePosition(self.pSnax2, self.pSnax2_pos, self.pSnax2_genPt)
InfoBar.addToInfoGroup([self,
profBox,
profName,
snaxBox,
snaxFrame,
snaxLabel,
gTotalsLabel,
bottomMask,
puzzleSolvedFrame,
snaxTotalsFrame,
mapTotals,
snaxTotals,
nextUnlockBox,
nextUnlockT1,
nextUnlockT2,
self.pName,
self.pGraphic,
self.pSol,
self.pSnaxText,
self.pSnax,
self.pSnax2])
@staticmethod
def wipe():
InfoBar.me = None
@staticmethod
def addToInfoGroup(spriteList):
for s in spriteList:
SelectScreen.me.addSpriteToGroup(s, "INFO")
def setFeaturePosition(self, feature, pos, genPoint):
"""
overlays feature on info bar
-feature must be sprite w/ rect
-x and y are position relative to InfoBar's top left
-genPoint can be "TOPLEFT" or "TOPRIGHT" or "CENTER"
"""
if genPoint == "TOPLEFT":
feature.rect.topleft = (self.rect.left + pos[0], self.rect.top + pos[1])
elif genPoint == "TOPRIGHT":
feature.rect.topright = (self.rect.left + pos[0], self.rect.top + pos[1])
elif genPoint == "CENTER":
feature.rect.center = (self.rect.left + pos[0], self.rect.top + pos[1])
def display(self, portal):
""" displays portal info when mouse over """
# portal name
if not portal.locked:
self.pName.rerender(portal.name, self.featureColorOn)
else:
self.pName.rerender(portal.name, self.featureColorOff)
self.setFeaturePosition(self.pName, self.pName_pos, self.pName_genPt)
# portal preview
if not portal.locked:
self.pGraphic.setImage(ImgLib.getImage(portal.name))
else:
imgKey = portal.name + "_L"
self.pGraphic.setImage(ImgLib.getImage(imgKey))
self.pGraphic.superImpose(ImgLib.getImage("LOCKED"), None, True)
self.setFeaturePosition(self.pGraphic, self.pGraphic_pos, self.pName_genPt)
# # portal difficulty
# if not portal.locked:
# self.pDif.rerender(portal.difToString(), self.featureColorOn)
# else:
# self.pDif.rerender(portal.difToString(), self.featureColorOff)
# self.setFeaturePosition(self.pDif, self.pDif_pos, self.pDif_genPt)
# puzzle status solved
if not portal.locked:
self.pSol.rerender(portal.solvedToString(), self.featureColorOn)
else:
self.pSol.rerender(portal.solvedToString(), self.featureColorOff)
self.setFeaturePosition(self.pSol, self.pSol_pos, self.pSol_genPt)
# snax text
if not portal.locked:
self.pSnaxText.rerender(portal.snaxToString(), SNAX_COLLECTED_TEXT_COLOR)
else:
self.pSnaxText.rerender(portal.snaxToString(), self.featureColorOff)
self.setFeaturePosition(self.pSnaxText, self.pSnaxText_posOn, self.pSnaxText_genPt)
# snax graphic
if not portal.locked:
self.pSnax.setImage(self.pSnax_liveImg)
self.pSnax2.setImage(self.pSnax2_liveImg)
else:
self.pSnax.setImage(self.pSnax_lockedImg)
self.pSnax2.setImage(self.pSnax2_lockedImg)
self.setFeaturePosition(self.pSnax, self.pSnax_pos, self.pSnax_genPt)
self.setFeaturePosition(self.pSnax2, self.pSnax2_pos, self.pSnax2_genPt)
def clearDisplay(self, portal):
""" clear display when mouse leaves portal bounds """
if self.pName.text == portal.name:
# portal name
self.pName.rerender(self.pName_defaultText, self.featureColorOff)
self.setFeaturePosition(self.pName, self.pName_pos, self.pName_genPt)
# portal preview
self.pGraphic.setImage(self.pGraphic_defaultImg)
self.setFeaturePosition(self.pGraphic, self.pGraphic_pos, self.pGraphic_genPt)
# # portal difficulty
# self.pDif.rerender(self.pDif_defaultText, self.featureColorOff)
# self.setFeaturePosition(self.pDif, self.pDif_pos, self.pDif_genPt)
# puzzle solved status
self.pSol.rerender(self.pSol_defaultText, self.featureColorOff)
self.setFeaturePosition(self.pSol, self.pSol_pos, self.pSol_genPt)
# snax text
self.pSnaxText.rerender(self.pSnaxText_defaultText, self.featureColorOff)
self.setFeaturePosition(self.pSnaxText, self.pSnaxText_pos, self.pSnaxText_genPt)
# snax graphic
self.pSnax.setImage(self.pSnax_defaultImg)
self.pSnax2.setImage(self.pSnax2_defaultImg)
self.setFeaturePosition(self.pSnax, self.pSnax_pos, self.pSnax_genPt)
self.setFeaturePosition(self.pSnax2, self.pSnax2_pos, self.pSnax2_genPt)
# ----------------------------------------------- functions
def launch(profileName, newGame, _fps, snapshot, swipeDirection):
"""
build the select screen
- profileName is name of player's file
- set newGame to True to write new file
"""
SCREEN_W = pygame.display.get_surface().get_width()
SCREEN_H = pygame.display.get_surface().get_height()
IB_W = SCREEN_W/4
IB_H = SCREEN_H
IB_COL = (40,40,40)
IB_TR = (SCREEN_W, 0)
ImgLib()
img = ImgLib.getImage
if newGame:
dataStorage56.writeProfile(profileName, NEW_PROFILE)
profileDelegate = ProfileDelegate(profileName)
# reset class var lastSnaxCount if coming from load profile only (not coming from puzzles)
if swipeDirection != "left":
profileDelegate.checkForUnlockedAndSyncData()
selectScreen = SelectScreen()
selectScreen.addSpriteToGroup(scroller56.Scroller(selectScreen, FIELDSIZE, "BL", _fps), "SCROLLER")
AreaCursor(img("CURSOR"))
InfoBar(profileName, (IB_W, IB_H), IB_COL, IB_TR, None, IB_ALPHA)
# Portal (name, imgLocked, imgUnlocked, location)
Portal("quit to title", img("BACK_ARROW"), img("BACK_ARROW"), img("BACK_ARROW"), grd(1,6))
Portal("interstellar snax ship", img("SNAX_SUB"), img("SNAX_SUB"), img("SNAX_SUB"), grd(2,6))
Portal("tutorial", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(3,6)) # needs label added
Portal("odd color out", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(4,6))
Portal("small detour", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(5,6))
Portal("go for it", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(6,6))
Portal("switches", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(6,5))
Portal("space walk", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(5,5))
Portal("wait for me", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(4,5))
Portal("crowded crew 1", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(3,5))
Portal("cross paths", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(2,5))
Portal("loop", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(1,5))
Portal("island", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(1,4))
Portal("sardines 1", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(2,4))
Portal("current", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(3,4))
Portal("try the hard way", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(4,4))
Portal("double back", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(5,4))
Portal("teamwork", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(6,4))
Portal("over and out", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(6,3))
Portal("symmetry 1", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(5,3))
Portal("corral", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(4,3))
Portal("crowded crew 2", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(3,3))
Portal("split up", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(2,3))
Portal("asteroid field", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(1,3))
Portal("jump the fence", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(1,2))
Portal("symmetry 2", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(2,2))
Portal("separate", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(3,2))
Portal("under your nose", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(4,2))
Portal("so close yet so far", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(5,2))
Portal("sections", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(6,2))
Portal("sardines 2", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(6,1))
Portal("tasty trove", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(5,1))
Portal("sandbar", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(4,1))
Portal("hopscotch", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(3,1))
Portal("corners", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(2,1))
Portal("copy cat", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(1,1))
Portal("small loop", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(1,0))
Portal("outpost", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(2,0))
Portal("take a hike", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(3,0))
Portal("not so fast", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(4,0))
Portal("really crowded crew", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(5,0))
Portal("rescue", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), grd(6,0))
Portal("gateway", img("PORT_LOCK"), img("PORT_OPEN"), img("PORT_COMP"), (300, grd(3,-1)[1]))
profileData = profileDelegate.getProfilePackage()
dest, puzzleName, snapshot = SelectScreen.me.runMe(_fps, snapshot, swipeDirection)
wipe()
return dest, puzzleName, snapshot
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ADD PORTAL |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
def grd(x, y):
""" return absolute location from grid """
return (x * XGRIDSTEP - XGRIDSTEP / 2, y * YGRIDSTEP - YGRIDSTEP / 2)
def wipe():
""" reset class attributes to none - call before leaving module """
SelectScreen.wipe()
ProfileDelegate.wipe()
AreaCursor.wipe()
InfoBar.wipe()
# FPS ---- ****
# fps = infoGraphic56.FPS(selectScreen)
# SelectScreen.me.addSpriteToGroup(fps, "BKG1")
# **** ---- ~~~~
# # sectors
# secRadius = 500
# secColor = (50,100,200)
# secCenter = (400,700)
# secWidth = 1
# B = SCREEN_H
# relCountPos = (secRadius, -20)
# hideBelow = 1
#
# # curProfile, color, center, radius, width=0, toUnlock=0, counterPos=(0,0), hideBelow=0
# testSector1 = sector.Sector(profileDelegate, secColor, secCenter, secRadius, secWidth, SECTOR1, relCountPos, hideBelow)
# testSector2 = sector.Sector(profileDelegate, (100, 20, 200), grd(5,2), 600, secWidth, SECTOR2, (600, -20))
#
# sectorList = [testSector1, testSector2]
# counterList = []
# for s in sectorList:
# if s.counter:
# counterList.append(s.counter)
#
# SelectScreen.me.addSpriteListToGroup(sectorList, "SECTOR")
# SelectScreen.me.addSpriteListToGroup(counterList, "SECTOR")
#
# # satellites
# satRadius = 2
# spd = .042 # default slow .1 ; creepSnake .02
# satCount = 145
# shadeStep = 12
# tailSize = 2
#
# if testSector1.unlocked:
# satList1 = testSector1.makeSatellites(satRadius, spd, satCount, shadeStep, tailSize)
# SelectScreen.me.addSpriteListToGroup(satList1, "SECTOR")
# if testSector2.unlocked:
# satList2 = testSector2.makeSatellites(satRadius, spd, satCount, shadeStep, tailSize)
# SelectScreen.me.addSpriteListToGroup(satList2, "SECTOR")
#
## *********************************
| {
"repo_name": "sabajt/Dinos-In-Space",
"path": "areaSelect.py",
"copies": "1",
"size": "61305",
"license": "mit",
"hash": 790700624899668200,
"line_mean": 44.0440852314,
"line_max": 594,
"alpha_frac": 0.5464970231,
"autogenerated": false,
"ratio": 3.269425630633033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43159226537330325,
"avg_score": null,
"num_lines": null
} |
""" AreaServer supplies a tiny image server for use with TileStache providers
that implement renderArea() (http://tilestache.org/doc/#custom-providers).
The built-in Mapnik provider (http://tilestache.org/doc/#mapnik-provider)
is one example.
There are no tiles here, just a quick & dirty way of getting variously-sized
images out of a codebase that's ordinarily oriented toward tile generation.
Example usage, with gunicorn (http://gunicorn.org):
gunicorn --bind localhost:8888 "TileStache.Goodies.AreaServer:WSGIServer('tilestache.cfg')"
AreaServer URLs are compatible with the built-in URL Template provider
(http://tilestache.org/doc/#url-template-provider) and implement a generic
kind of WMS (http://en.wikipedia.org/wiki/Web_Map_Service).
All six URL parameters shown in this example are required; any other
URL parameter is ignored:
http://localhost:8888/layer-name?width=600&height=600&xmin=-100&ymin=-100&xmax=100&ymax=100
"""
from datetime import timedelta
from datetime import datetime
from io import BytesIO
from TileStache.py3_compat import parse_qsl
from TileStache import WSGITileServer
from TileStache.Core import KnownUnknown
class WSGIServer (WSGITileServer):
""" WSGI Application that can handle WMS-style requests for static images.
Inherits the constructor from TileStache WSGI, which just loads
a TileStache configuration file into self.config.
WSGITileServer autoreload argument is ignored, though. For now.
"""
def __call__(self, environ, start_response):
""" Handle a request, using PATH_INFO and QUERY_STRING from environ.
There are six required query string parameters: width, height,
xmin, ymin, xmax and ymax. Layer name must be supplied in PATH_INFO.
"""
try:
for var in 'QUERY_STRING PATH_INFO'.split():
if var not in environ:
raise KnownUnknown('Missing "%s" environment variable' % var)
query = dict(parse_qsl(environ['QUERY_STRING']))
for param in 'width height xmin ymin xmax ymax'.split():
if param not in query:
raise KnownUnknown('Missing "%s" parameter' % param)
layer = environ['PATH_INFO'].strip('/')
layer = self.config.layers[layer]
provider = layer.provider
if not hasattr(provider, 'renderArea'):
raise KnownUnknown('Layer "%s" provider %s has no renderArea() method' % (layer.name(), provider.__class__))
width, height = [int(query[p]) for p in 'width height'.split()]
xmin, ymin, xmax, ymax = [float(query[p]) for p in 'xmin ymin xmax ymax'.split()]
#
# Don't supply srs or zoom parameters, which may cause problems for
# some providers. TODO: add optional support for these two parameters.
#
output = BytesIO()
image = provider.renderArea(width, height, None, xmin, ymin, xmax, ymax, None)
image.save(output, format='PNG')
headers = [('Content-Type', 'image/png')]
if layer.allowed_origin:
headers.append(('Access-Control-Allow-Origin', layer.allowed_origin))
if layer.max_cache_age is not None:
expires = datetime.utcnow() + timedelta(seconds=layer.max_cache_age)
headers.append(('Expires', expires.strftime('%a %d %b %Y %H:%M:%S GMT')))
headers.append(('Cache-Control', 'public, max-age=%d' % layer.max_cache_age))
start_response('200 OK', headers)
return output.getvalue()
except KnownUnknown, e:
start_response('400 Bad Request', [('Content-Type', 'text/plain')])
return str(e)
| {
"repo_name": "TileStache/TileStache",
"path": "TileStache/Goodies/AreaServer.py",
"copies": "1",
"size": "3832",
"license": "bsd-3-clause",
"hash": -6199680684828739000,
"line_mean": 41.1098901099,
"line_max": 124,
"alpha_frac": 0.6393528184,
"autogenerated": false,
"ratio": 4.187978142076503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017854891610437024,
"num_lines": 91
} |
# AREA Views #
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from rest_framework import permissions
from area.models import Region, Location, LocationDirectory
from area.serializers import RegionSerializer, RegionDetailSerializer, LocationSerializer, LocationDetailSerializer, LocationDirectorySerializer
########
# RESTful API views.
class API_RegionList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = RegionSerializer
def get_queryset(self):
""" Return Regions of the current user. """
# Authorization check.
# We assume later that region_list is already
# filtered with authorized regions only so we simply
# add some more filters.
region_list = Region.objects.filter(owner=self.request.user)
# Paginate
# FIXME The build in "LimitOffsetPagination" didn't work
# Had to write directly in the view. NEED TO DRY THIS!
if any(q for q in self.request.query_params if q in ['limit', 'offset']):
if 'limit' in self.request.query_params:
limit = int(self.request.query_params.get('limit'))
offset = int(self.request.query_params.get('offset'))\
if 'offset' in self.request.query_params else 0
if 'limit' in locals():
region_list = region_list[offset:limit+offset]
else:
region_list = region_list[offset:]
return region_list
class API_LocationList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = LocationSerializer
def get_queryset(self):
""" Return Locations of the current user.
You may specify Location in GET request. """
# Authorization check.
# We assume later that location_list is already
# filtered with authorized locations only so we simply
# add some more filters.
location_list = Location.objects.filter(region__owner=self.request.user)
# Region filtering.
# There is filtering locations by Region available.
if 'region' in self.request.query_params:
try:
location_list = location_list.filter(region=int(self.request.query_params['region']))
except:
pass
# Paginate
# FIXME The build in "LimitOffsetPagination" didn't work
# Had to write directly in the view. NEED TO DRY THIS!
if any(q for q in self.request.query_params if q in ['limit', 'offset']):
if 'limit' in self.request.query_params:
limit = int(self.request.query_params.get('limit'))
offset = int(self.request.query_params.get('offset'))\
if 'offset' in self.request.query_params else 0
if 'limit' in locals():
location_list = location_list[offset:limit+offset]
else:
location_list = location_list[offset:]
return location_list
class API_LocationDirectoryList(generics.ListAPIView):
serializer_class = LocationDirectorySerializer
def get_queryset(self):
""" Return all LocationDirectories. """
# We now assume that all location designs are available to every player.
location_directory_list = LocationDirectory.objects.all()
return location_directory_list
class API_RegionDetail(APIView):
""" Details of Region object. """
# FIXME! Should DRY this. RetrieveAPIView makes it easy!
permission_classes = (permissions.IsAuthenticated,)
serializer_class = RegionDetailSerializer
def get_object(self, pk):
""" Get already authorized Region."""
region_object = Region.objects.get(pk=pk, owner=self.request.user)
return region_object
def get(self, request, pk, format=None):
try:
region = self.get_object(pk)
except Region.DoesNotExist:
return Response("Authorization error or wrong Region id.",
status=status.HTTP_404_NOT_FOUND)
return Response(self.serializer_class(region).data)
def put(self, request, pk):
""" We allow to modify only the name of the Region,
or call built-in methods. """
# Get authorized Region to deal with.
# Although user can try to specify a different 'pk' in his JSON,
# everything should be OK and only the one from URL will be used.
# Maybe check this possible security issue one day.
try:
region = self.get_object(pk)
except Region.DoesNotExist:
return Response("Authorization error or wrong Region id.",
status=status.HTTP_403_FORBIDDEN)
print(self.request.data)
# We do not have any built-in methods now, so this is just a stub.
return Response(self.serializer_class(region).data)
class API_LocationDetail(APIView):
""" Details of Location object. """
# FIXME! Should DRY this. RetrieveAPIView makes it easy!
permission_classes = (permissions.IsAuthenticated,)
serializer_class = LocationDetailSerializer
def get_object(self, pk):
""" Get already authorized Location."""
return Location.objects.get(pk=pk, region__owner=self.request.user)
def get(self, request, pk, format=None):
try:
location = self.get_object(pk)
except Location.DoesNotExist:
return Response("Authorization error or wrong Location id.",
status=status.HTTP_404_NOT_FOUND)
return Response(self.serializer_class(location).data)
| {
"repo_name": "ngr/sm_00",
"path": "area/views.py",
"copies": "1",
"size": "5847",
"license": "mit",
"hash": 2998464170895763500,
"line_mean": 38.2416107383,
"line_max": 144,
"alpha_frac": 0.638276039,
"autogenerated": false,
"ratio": 4.511574074074074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017067875947198902,
"num_lines": 149
} |
# A recursive descent parser that implements an integer calculator
# with variables and conditional statements.
#
# This parser implements exactly the same grammar as
# rd_parser_ebnf, but it evaluates expressions using a different
# technique. Instead of recursively evaluating them following the
# EBNF grammar, it uses an embedded infix expression evaluator
# based on the Shunting Yard algorithm.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
from __future__ import with_statement
from contextlib import contextmanager
import operator
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
""" The calculator statement parser. Evaluates statements
and expressions on the fly, returning a numeric result
for all calc() calls.
"""
def __init__(self):
lex_rules = [
('set', 'SET'),
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\*\*', '**'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('>>', '>>'),
('<<', '<<'),
('&', '&'),
('\^', '^'),
('\|', '|'),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def calc(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to calc can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
val = self._stmt()
if self.cur_token.type != None:
self._error('Unexpected token %s (at #%s)' % (
self.cur_token.val, self.cur_token.pos))
return val
def _clear(self):
self.cur_token = None
self.var_table = {}
self.only_syntax_check = False
# Some rules are parsed with the self.only_syntax_check flag
# turned on. This means that the syntactic structure of the
# rules has to be checked, but no side effects are to be
# executed. Example side effect: assignment to a variable.
#
# This is used, for example, when a branch of an if statement
# is not taken (e.g. the 'else' branch of a true condition),
# but we should still verify that the syntax is correct.
#
# To implement this, the syntax_check context manager can be
# used. When a rule wants to parse some sub-rule with
# self.only_syntax_check turned on, it can do it as follows:
#
# with self._syntax_check():
# ... parse sub-rules
#
# This will ensure that the only_syntax_check flag is set
# before the sub-rules are parsed and turned off after.
#
@contextmanager
def _syntax_check(self):
# We must catch and reraise exceptions (for example,
# ParseError can happen), but turn off the flag anyway,
# so that subsequent statements won't be affected.
#
try:
self.only_syntax_check = True
yield
except:
raise
finally:
self.only_syntax_check = False
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s (found %s)' % (
type, self.cur_token.type))
# The toplevel rule of the parser.
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <infix_expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
return self._assign_stmt()
elif self.cur_token.type == 'IF':
return self._if_stmt()
else:
return self._infix_eval()
# <if_stmt> : if <infix_expr> then <stmt> [else <stmt>]
#
def _if_stmt(self):
self._match('IF')
condition = self._infix_eval()
self._match('THEN')
if condition:
# The condition is true, so we'll evaluate the 'then'
# clause, and only syntax check the 'else' clause,
# if there is one.
#
result = self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
with self._syntax_check():
self._stmt()
return result
else:
# The condition is false, so we'll only syntax check
# the 'then' clause, and evaluate the 'else' clause,
# if there is one.
#
with self._syntax_check():
self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
return self._stmt()
else:
return None
# <assign_stmt> : set <id> = <infix_expr>
#
def _assign_stmt(self):
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._infix_eval()
# When syntax checking, don't actually do the assignment
#
if not self.only_syntax_check:
self.var_table[id_name] = expr_val
return expr_val
##
## The infix expression evaluator.
## Returns the value of the evaluated expression.
##
## Infix expressions are numbers and identifiers separated by
## binary (and unary) operators, possibly with parts delimited
## by parentheses. The operators supported by this evaluator
## and their precedences are controlled through the _ops
## table.
##
## Internally, uses two stacks. One for keeping the operations
## that still await results, and another for keeping the
## results.
##
##
def _infix_eval(self):
""" Run the infix evaluator and return the result.
"""
self.op_stack = []
self.res_stack = []
self.op_stack.append(self._sentinel)
self._infix_eval_expr()
return self.res_stack[-1]
class Op(object):
""" Represents an operator recognized by the infix
evaluator. Each operator has a numeric precedence,
and flags specifying whether it's unary/binary and
right/left associative.
"""
def __init__( self, name, op, prec,
unary=False, right_assoc=False):
self.name = name
self.op = op
self.prec = prec
self.unary = unary
self.binary = not self.unary
self.right_assoc = right_assoc
self.left_assoc = not self.right_assoc
def apply(self, *args):
return self.op(*args)
def precedes(self, other):
""" The '>' operator from the Shunting Yard algorithm.
I don't call it '>' on purpose, as its semantics
are unusual (i.e. this is not the familiar
algebraic '>')
"""
if self.binary and other.binary:
if self.prec > other.prec:
return True
elif self.left_assoc and (self.prec == other.prec):
return True
elif self.unary and other.binary:
return self.prec >= other.prec
return False
def __repr__(self):
return '<%s(%s)>' % (self.name, self.prec)
# The operators recognized by the evaluator.
#
_ops = {
'u-': Op('unary -', operator.neg, 90, unary=True),
'**': Op('**', operator.pow, 70, right_assoc=True),
'*': Op('*', operator.mul, 50),
'/': Op('/', operator.div, 50),
'+': Op('+', operator.add, 40),
'-': Op('-', operator.sub, 40),
'<<': Op('<<', operator.lshift, 35),
'>>': Op('>>', operator.rshift, 35),
'&': Op('&', operator.and_, 30),
'^': Op('^', operator.xor, 29),
'|': Op('|', operator.or_, 28),
'>': Op('>', operator.gt, 20),
'>=': Op('>=', operator.ge, 20),
'<': Op('<', operator.lt, 20),
'<=': Op('<=', operator.le, 20),
'==': Op('==', operator.eq, 15),
'!=': Op('!=', operator.ne, 15),
}
# A set of operators that can be unary. If such an operator
# is found, 'u' is prepended to its symbol for finding it in
# the _ops table
#
_unaries = set(['-'])
# Dummy operator with the lowest possible precedence (the
# Sentinel value in the Shunting Yard algorithm)
#
_sentinel = Op(None, None, 0)
def _infix_eval_expr(self):
""" Evaluates an 'expression' - atoms separated by binary
operators.
"""
self._infix_eval_atom()
while ( self.cur_token.type in self._ops and
self._ops[self.cur_token.type].binary):
self._push_op(self._ops[self.cur_token.type])
self._get_next_token()
self._infix_eval_atom()
while self.op_stack[-1] != self._sentinel:
self._pop_op()
def _infix_eval_atom(self):
""" Evaluates an 'atom' - either an identifier/number, or
an atom prefixed by a unary operation, or a full
expression inside parentheses.
"""
if self.cur_token.type in ['IDENTIFIER', 'NUMBER']:
self.res_stack.append(self._compute_val(self.cur_token))
self._get_next_token()
elif self.cur_token.type == '(':
self._get_next_token()
self.op_stack.append(self._sentinel)
self._infix_eval_expr()
self._match(')')
self.op_stack.pop()
elif self.cur_token.type in self._unaries:
self._push_op(self._ops['u' + self.cur_token.type])
self._get_next_token()
self._infix_eval_atom()
def _push_op(self, op):
""" Pushes an operation onto the op stack.
But first computes and removes all higher-precedence
operators from it.
"""
#~ print 'push_op: stack =', self.op_stack
#~ print ' ...', op
while self.op_stack[-1].precedes(op):
self._pop_op()
self.op_stack.append(op)
#~ print ' ... =>', self.op_stack
def _pop_op(self):
""" Pops an operation from the op stack, computing its
result and storing it on the result stack.
"""
#~ print 'pop_op: op_stack =', self.op_stack
#~ print ' ... res_stack =', self.res_stack
top_op = self.op_stack.pop()
if top_op.unary:
self.res_stack.append(top_op.apply(self.res_stack.pop()))
else:
if len(self.res_stack) < 2:
self._error('Not enough arguments for operator %s' % top_op.name)
t1 = self.res_stack.pop()
t0 = self.res_stack.pop()
self.res_stack.append(top_op.apply(t0, t1))
#~ print ' ... => res_stack =', self.res_stack
def _compute_val(self, tok):
""" Compute the value of a number or an identifier.
"""
if tok.type == 'NUMBER':
return int(tok.val)
elif tok.type == 'IDENTIFIER':
if self.only_syntax_check:
return 0
else:
try:
val = self.var_table[tok.val]
except KeyError:
self._error('Unknown identifier `%s`' % tok.val)
return val
else:
assert 0
if __name__ == '__main__':
p = CalcParser()
print p.calc('2 + 4 - (8 + 5) * 3 ** 2 - 1')
#~ p.calc('set p = 1')
#~ p.calc('set p = p * 2')
#~ p.calc('if 5 > 5 then set p = p * 2 else set p = 0')
| {
"repo_name": "evandrix/Splat",
"path": "doc/parser/rd_parser_infix_expr.py",
"copies": "1",
"size": "13588",
"license": "mit",
"hash": 8896937764137831000,
"line_mean": 32.6336633663,
"line_max": 81,
"alpha_frac": 0.4853547248,
"autogenerated": false,
"ratio": 4.1873651771956855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016613567362649496,
"num_lines": 404
} |
# A recursive descent parser that implements an integer calculator
# with variables and conditional statements.
#
# This parser implements exactly the same grammar as
# rd_parser_ebnf, but it evaluates expressions using a different
# technique. Instead of recursively evaluating them following the
# EBNF grammar, it uses an embedded infix expression evaluator
# based on the Shunting Yard algorithm.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
from __future__ import with_statement
from contextlib import contextmanager
import operator
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
""" The calculator statement parser. Evaluates statements
and expressions on the fly, returning a numeric result
for all calc() calls.
"""
def __init__(self):
lex_rules = [
('set', 'SET'),
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\*\*', '**'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('>>', '>>'),
('<<', '<<'),
('&', '&'),
('\^', '^'),
('\|', '|'),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def calc(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to calc can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
val = self._stmt()
if self.cur_token.type != None:
self._error('Unexpected token %s (at #%s)' % (
self.cur_token.val, self.cur_token.pos))
return val
def _clear(self):
self.cur_token = None
self.var_table = {}
self.only_syntax_check = False
# Some rules are parsed with the self.only_syntax_check flag
# turned on. This means that the syntactic structure of the
# rules has to be checked, but no side effects are to be
# executed. Example side effect: assignment to a variable.
#
# This is used, for example, when a branch of an if statement
# is not taken (e.g. the 'else' branch of a true condition),
# but we should still verify that the syntax is correct.
#
# To implement this, the syntax_check context manager can be
# used. When a rule wants to parse some sub-rule with
# self.only_syntax_check turned on, it can do it as follows:
#
# with self._syntax_check():
# ... parse sub-rules
#
# This will ensure that the only_syntax_check flag is set
# before the sub-rules are parsed and turned off after.
#
@contextmanager
def _syntax_check(self):
# We must catch and reraise exceptions (for example,
# ParseError can happen), but turn off the flag anyway,
# so that subsequent statements won't be affected.
#
try:
self.only_syntax_check = True
yield
except:
raise
finally:
self.only_syntax_check = False
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s (found %s)' % (
type, self.cur_token.type))
# The toplevel rule of the parser.
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <infix_expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
return self._assign_stmt()
elif self.cur_token.type == 'IF':
return self._if_stmt()
else:
return self._infix_eval()
# <if_stmt> : if <infix_expr> then <stmt> [else <stmt>]
#
def _if_stmt(self):
self._match('IF')
condition = self._infix_eval()
self._match('THEN')
if condition:
# The condition is true, so we'll evaluate the 'then'
# clause, and only syntax check the 'else' clause,
# if there is one.
#
result = self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
with self._syntax_check():
self._stmt()
return result
else:
# The condition is false, so we'll only syntax check
# the 'then' clause, and evaluate the 'else' clause,
# if there is one.
#
with self._syntax_check():
self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
return self._stmt()
else:
return None
# <assign_stmt> : set <id> = <infix_expr>
#
def _assign_stmt(self):
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._infix_eval()
# When syntax checking, don't actually do the assignment
#
if not self.only_syntax_check:
self.var_table[id_name] = expr_val
return expr_val
##
## The infix expression evaluator.
## Returns the value of the evaluated expression.
##
## Infix expressions are numbers and identifiers separated by
## binary (and unary) operators, possibly with parts delimited
## by parentheses. The operators supported by this evaluator
## and their precedences are controlled through the _ops
## table.
##
## Internally, uses two stacks. One for keeping the operations
## that still await results, and another for keeping the
## results.
##
##
def _infix_eval(self):
""" Run the infix evaluator and return the result.
"""
self.op_stack = []
self.res_stack = []
self.op_stack.append(self._sentinel)
self._infix_eval_expr()
return self.res_stack[-1]
class Op(object):
""" Represents an operator recognized by the infix
evaluator. Each operator has a numeric precedence,
and flags specifying whether it's unary/binary and
right/left associative.
"""
def __init__( self, name, op, prec,
unary=False, right_assoc=False):
self.name = name
self.op = op
self.prec = prec
self.unary = unary
self.binary = not self.unary
self.right_assoc = right_assoc
self.left_assoc = not self.right_assoc
def apply(self, *args):
return self.op(*args)
def precedes(self, other):
""" The '>' operator from the Shunting Yard algorithm.
I don't call it '>' on purpose, as its semantics
are unusual (i.e. this is not the familiar
algebraic '>')
"""
if self.binary and other.binary:
if self.prec > other.prec:
return True
elif self.left_assoc and (self.prec == other.prec):
return True
elif self.unary and other.binary:
return self.prec >= other.prec
return False
def __repr__(self):
return '<%s(%s)>' % (self.name, self.prec)
# The operators recognized by the evaluator.
#
_ops = {
'u-': Op('unary -', operator.neg, 90, unary=True),
'**': Op('**', operator.pow, 70, right_assoc=True),
'*': Op('*', operator.mul, 50),
'/': Op('/', operator.div, 50),
'+': Op('+', operator.add, 40),
'-': Op('-', operator.sub, 40),
'<<': Op('<<', operator.lshift, 35),
'>>': Op('>>', operator.rshift, 35),
'&': Op('&', operator.and_, 30),
'^': Op('^', operator.xor, 29),
'|': Op('|', operator.or_, 28),
'>': Op('>', operator.gt, 20),
'>=': Op('>=', operator.ge, 20),
'<': Op('<', operator.lt, 20),
'<=': Op('<=', operator.le, 20),
'==': Op('==', operator.eq, 15),
'!=': Op('!=', operator.ne, 15),
}
# A set of operators that can be unary. If such an operator
# is found, 'u' is prepended to its symbol for finding it in
# the _ops table
#
_unaries = set(['-'])
# Dummy operator with the lowest possible precedence (the
# Sentinel value in the Shunting Yard algorithm)
#
_sentinel = Op(None, None, 0)
def _infix_eval_expr(self):
""" Evaluates an 'expression' - atoms separated by binary
operators.
"""
self._infix_eval_atom()
while ( self.cur_token.type in self._ops and
self._ops[self.cur_token.type].binary):
self._push_op(self._ops[self.cur_token.type])
self._get_next_token()
self._infix_eval_atom()
while self.op_stack[-1] != self._sentinel:
self._pop_op()
def _infix_eval_atom(self):
""" Evaluates an 'atom' - either an identifier/number, or
an atom prefixed by a unary operation, or a full
expression inside parentheses.
"""
if self.cur_token.type in ['IDENTIFIER', 'NUMBER']:
self.res_stack.append(self._compute_val(self.cur_token))
self._get_next_token()
elif self.cur_token.type == '(':
self._get_next_token()
self.op_stack.append(self._sentinel)
self._infix_eval_expr()
self._match(')')
self.op_stack.pop()
elif self.cur_token.type in self._unaries:
self._push_op(self._ops['u' + self.cur_token.type])
self._get_next_token()
self._infix_eval_atom()
def _push_op(self, op):
""" Pushes an operation onto the op stack.
But first computes and removes all higher-precedence
operators from it.
"""
#~ print 'push_op: stack =', self.op_stack
#~ print ' ...', op
while self.op_stack[-1].precedes(op):
self._pop_op()
self.op_stack.append(op)
#~ print ' ... =>', self.op_stack
def _pop_op(self):
""" Pops an operation from the op stack, computing its
result and storing it on the result stack.
"""
#~ print 'pop_op: op_stack =', self.op_stack
#~ print ' ... res_stack =', self.res_stack
top_op = self.op_stack.pop()
if top_op.unary:
self.res_stack.append(top_op.apply(self.res_stack.pop()))
else:
if len(self.res_stack) < 2:
self._error('Not enough arguments for operator %s' % top_op.name)
t1 = self.res_stack.pop()
t0 = self.res_stack.pop()
self.res_stack.append(top_op.apply(t0, t1))
#~ print ' ... => res_stack =', self.res_stack
def _compute_val(self, tok):
""" Compute the value of a number or an identifier.
"""
if tok.type == 'NUMBER':
return int(tok.val)
elif tok.type == 'IDENTIFIER':
if self.only_syntax_check:
return 0
else:
try:
val = self.var_table[tok.val]
except KeyError:
self._error('Unknown identifier `%s`' % tok.val)
return val
else:
assert 0
if __name__ == '__main__':
p = CalcParser()
print p.calc('2 + 4 - (8 + 5) * 3 ** 2 - 1')
#~ p.calc('set p = 1')
#~ p.calc('set p = p * 2')
#~ p.calc('if 5 > 5 then set p = p * 2 else set p = 0')
| {
"repo_name": "eliben/code-for-blog",
"path": "2009/py_rd_parser_example/rd_parser_infix_expr.py",
"copies": "1",
"size": "13290",
"license": "unlicense",
"hash": 6218698606599477000,
"line_mean": 32.1421446384,
"line_max": 81,
"alpha_frac": 0.4962377728,
"autogenerated": false,
"ratio": 4.1043854231006796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510062319590068,
"avg_score": null,
"num_lines": null
} |
# A recursive descent parser that implements an integer calculator
# with variables and conditional statements.
# The grammar is LL(1), suitable for predictive parsing.
#
# EBNF:
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <cmp_expr>
#
# <assign_stmt> : set <id> = <cmp_expr>
#
## Note 'else' binds to the innermost 'if', like in C
#
# <if_stmt> : if <cmp_expr> then <stmt> [else <stmt>]
#
# <cmp_expr> : <bitor_expr> [== <bitor_expr>]
# | <bitor_expr> [!= <bitor_expr>]
# | <bitor_expr> [> <bitor_expr>]
# | <bitor_expr> [< <bitor_expr>]
# | <bitor_expr> [>= <bitor_expr>]
# | <bitor_expr> [<= <bitor_expr>]
#
# <bitor_expr> | <bitxor_expr> {| <bitxor_expr>}
#
# <bitxor_expr> | <bitand_expr> {^ <bitand_expr>}
#
# <bitand_expr> | <shift_expr> {& <shift_expr>}
#
# <shift_expr> | <arith_expr> {<< <arith_expr>}
# : <arith_expr> {>> <arith_expr>}
#
# <arith_expr> : <term> {+ <term>}
# | <term> {- <term>}
#
# <term> : <power> {* <power>}
# | <power> {/ <power>}
#
# <power> : <factor> ** <power>
# | <factor>
#
# <factor> : <id>
# | <number>
# | - <factor>
# | ( <cmp_expr> )
#
# <id> : [a-zA-Z_]\w+
# <number> : \d+
#
# Employs EBNF and looping to solve the associativity problem in
# <term> and <arith_expr>.
# Note that <power> is defined recursively and not using EBNF
# grouping {** <factor>}. This is on purpose - as it makes the
# right-associativity of exponentation naturally expressed in
# the recursion.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
from __future__ import with_statement
from contextlib import contextmanager
import operator
try:
import eblib.lexer as lexer
except ImportError:
import lexer
class ParseError(Exception): pass
class CalcParser(object):
""" The calculator statement parser. Evaluates statements
and expressions on the fly, returning a numeric result
for all calc() calls.
"""
def __init__(self):
lex_rules = [
('set', 'SET'),
('if', 'IF'),
('then', 'THEN'),
('else', 'ELSE'),
('\d+', 'NUMBER'),
('[a-zA-Z_]\w*', 'IDENTIFIER'),
('\*\*', '**'),
('!=', '!='),
('==', '=='),
('>=', '>='),
('<=', '<='),
('>>', '>>'),
('<<', '<<'),
('&', '&'),
('\^', '^'),
('\|', '|'),
('<', '<'),
('>', '>'),
('\+', '+'),
('\-', '-'),
('\*', '*'),
('\/', '/'),
('\(', '('),
('\)', ')'),
('=', '='),
]
self.lexer = lexer.Lexer(lex_rules, skip_whitespace=True)
self._clear()
def calc(self, line):
""" Parse a new line of input and return its result.
Variables defined in previous calls to calc can be
used in following ones.
ParseError can be raised in case of errors.
"""
self.lexer.input(line)
self._get_next_token()
val = self._stmt()
if self.cur_token.type != None:
self._error('Unexpected token %s (at #%s)' % (
self.cur_token.val, self.cur_token.pos))
return val
def _clear(self):
self.cur_token = None
self.var_table = {}
self.only_syntax_check = False
# Some rules are parsed with the self.only_syntax_check flag
# turned on. This means that the syntactic structure of the
# rules has to be checked, but no side effects are to be
# executed. Example side effect: assignment to a variable.
#
# This is used, for example, when a branch of an if statement
# is not taken (e.g. the 'else' branch of a true condition),
# but we should still verify that the syntax is correct.
#
# To implement this, the syntax_check context manager can be
# used. When a rule wants to parse some sub-rule with
# self.only_syntax_check turned on, it can do it as follows:
#
# with self._syntax_check():
# ... parse sub-rules
#
# This will ensure that the only_syntax_check flag is set
# before the sub-rules are parsed and turned off after.
#
@contextmanager
def _syntax_check(self):
# We must catch and reraise exceptions (for example,
# ParseError can happen), but turn off the flag anyway,
# so that subsequent statements won't be affected.
#
try:
self.only_syntax_check = True
yield
except:
raise
finally:
self.only_syntax_check = False
def _error(self, msg):
raise ParseError(msg)
def _get_next_token(self):
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError, e:
self._error('Lexer error at position %d' % e.pos)
def _match(self, type):
""" The 'match' primitive of RD parsers.
* Verifies that the current token is of the given type
* Returns the value of the current token
* Reads in the next token
"""
if self.cur_token.type == type:
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched %s (found %s)' % (
type, self.cur_token.type))
# The toplevel rule of the parser.
#
# <stmt> : <assign_stmt>
# | <if_stmt>
# | <cmp_expr>
#
def _stmt(self):
if self.cur_token.type is None:
return ''
elif self.cur_token.type == 'SET':
return self._assign_stmt()
elif self.cur_token.type == 'IF':
return self._if_stmt()
else:
return self._cmp_expr()
# <if_stmt> : if <cmd_expr> then <stmt> [else <stmt>]
#
def _if_stmt(self):
self._match('IF')
condition = self._cmp_expr()
self._match('THEN')
if condition:
# The condition is true, so we'll evaluate the 'then'
# clause, and only syntax check the 'else' clause,
# if there is one.
#
result = self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
with self._syntax_check():
self._stmt()
return result
else:
# The condition is false, so we'll only syntax check
# the 'then' clause, and evaluate the 'else' clause,
# if there is one.
#
with self._syntax_check():
self._stmt()
if self.cur_token.type == 'ELSE':
self._match('ELSE')
return self._stmt()
else:
return None
# <assign_stmt> : set <id> = <cmp_expr>
#
def _assign_stmt(self):
self._match('SET')
id_name = self._match('IDENTIFIER')
self._match('=')
expr_val = self._cmp_expr()
# When syntax checking, don't actually do the assignment
#
if not self.only_syntax_check:
self.var_table[id_name] = expr_val
return expr_val
# <cmp_expr> : <bitor_expr> [== <bitor_expr>]
# | <bitor_expr> [!= <bitor_expr>]
# | <bitor_expr> [> <bitor_expr>]
# | <bitor_expr> [< <bitor_expr>]
# | <bitor_expr> [>= <bitor_expr>]
# | <bitor_expr> [<= <bitor_expr>]
#
_cmp_op_map = {
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
def _cmp_expr(self):
lval = self._bitor_expr()
for op_name, op in self._cmp_op_map.iteritems():
if self.cur_token.type == op_name:
self._match(op_name)
return apply(op, [lval, self._bitor_expr()])
# No known comparison op matched...
#
return lval
# <bitor_expr> | <bitxor_expr> {| <bitxor_expr>}
#
def _bitor_expr(self):
lval = self._bitxor_expr()
while self.cur_token.type == '|':
self._match('|')
lval |= self._bitxor_expr()
return lval
# <bitxor_expr> | <bitand_expr> {^ <bitand_expr>}
#
def _bitxor_expr(self):
lval = self._bitand_expr()
while self.cur_token.type == '^':
self._match('^')
lval ^= self._bitand_expr()
return lval
# <bitand_expr> | <shift_expr> {& <shift_expr>}
#
def _bitand_expr(self):
lval = self._shift_expr()
while self.cur_token.type == '&':
self._match('&')
lval &= self._shift_expr()
return lval
# <shift_expr> | <arith_expr> {<< <arith_expr>}
# : <arith_expr> {>> <arith_expr>}
#
def _shift_expr(self):
lval = self._arith_expr()
while self.cur_token.type in ['>>', '<<']:
if self.cur_token.type == '>>':
self._match('>>')
lval >>= self._arith_expr()
elif self.cur_token.type == '<<':
self._match('<<')
lval <<= self._arith_expr()
return lval
# <arith_expr> : <term> {+ <term>}
# | <term> {- <term>}
#
def _arith_expr(self):
lval = self._term()
while self.cur_token.type in ['+', '-']:
if self.cur_token.type == '+':
self._match('+')
lval += self._term()
elif self.cur_token.type == '-':
self._match('-')
lval -= self._term()
return lval
# <term> : <power> {* <power>}
# | <power> {/ <power>}
#
def _term(self):
lval = self._power()
while self.cur_token.type in ['/', '*']:
if self.cur_token.type == '*':
self._match('*')
lval *= self._power()
elif self.cur_token.type == '/':
self._match('/')
lval /= self._power()
return lval
# <power> : <factor> ** <power>
# | <factor>
#
def _power(self):
lval = self._factor()
if self.cur_token.type == '**':
self._match('**')
lval **= self._power()
return lval
# <factor> : <id>
# | <number>
# | - <factor>
# | ( <cmp_expr> )
#
def _factor(self):
if self.cur_token.type == '(':
self._match('(')
val = self._cmp_expr()
self._match(')')
return val
elif self.cur_token.type == 'NUMBER':
return int(self._match('NUMBER'))
elif self.cur_token.type == '-':
self._match('-')
return -(self._factor())
elif self.cur_token.type == 'IDENTIFIER':
id_name = self._match('IDENTIFIER')
# When syntax checking, we don't care if the variable
# was defined prior to use
#
if self.only_syntax_check:
return 0
else:
try:
val = self.var_table[id_name]
except KeyError:
self._error('Unknown identifier `%s`' % id_name)
return val
else:
self._error('Invalid factor `%s`' % self.cur_token.val)
def calculator_prompt():
""" A toy calculator prompt for interactive computations.
"""
print 'Welcome to the calculator. Press Ctrl+C to exit.'
cp = CalcParser()
try:
while True:
try:
line = raw_input('--> ')
print cp.calc(line)
except ParseError, err:
print 'Error:', err
except KeyboardInterrupt:
print '... Thanks for using the calculator.'
if __name__ == '__main__':
import sys
if len(sys.argv) > 1 and sys.argv[1] == '-p':
calculator_prompt()
sys.exit()
p = CalcParser()
#
# If stuff works correctly, this will print 42
#
p.calc('set joe = 4 - 5 - 1') # 0
print p.calc('joe')
#~ p.calc('set mar = joe + 2 ** 4 * -3') # -48
#~ p.calc('set pie = 2 ** 3 ** 2') # 512
#~ p.calc('if joe != 0 then set pie = 3') # pie stays 512
#~ p.calc('if 1 == 1 then set k = 10 else set k = 20') # 10
#~ p.calc('if k > 20 then set k = 12') # k stays 10
#~ p.calc('if k <= 11 then set t = 0 else set t = 2') # 0
#~ print p.calc('pie - (k * -mar) + k + t') # 42
| {
"repo_name": "eliben/code-for-blog",
"path": "2009/py_rd_parser_example/rd_parser_ebnf.py",
"copies": "1",
"size": "13524",
"license": "unlicense",
"hash": 5532351703273447000,
"line_mean": 28.5929978118,
"line_max": 68,
"alpha_frac": 0.4549689441,
"autogenerated": false,
"ratio": 3.7472984206151287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9695726482285452,
"avg_score": 0.0013081764859354237,
"num_lines": 457
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.