text stringlengths 0 1.05M | meta dict |
|---|---|
#add your custom response handler class to this module
import sys,json,csv,io
from pysnmp.entity.rfc3413 import mibvar
#the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,destination,table=False,from_trap=False,trap_metadata=None,split_bulk_output=False,mibView=None):
splunkevent =""
#handle traps
if from_trap:
for oid, val in response_object:
try:
(symName, modName), indices = mibvar.oidToMibName(mibView, oid)
splunkevent +='%s::%s.%s = ' % (modName, symName,'.'.join([ v.prettyPrint() for v in indices]))
except: # catch *all* exceptions
e = sys.exc_info()[1]
splunkevent +='%s = ' % (oid)
try:
decodedVal = mibvar.cloneFromMibValue(mibView,modName,symName,val)
splunkevent +='%s ' % (decodedVal.prettyPrint())
except: # catch *all* exceptions
e = sys.exc_info()[1]
splunkevent +='%s ' % (val.prettyPrint())
splunkevent = trap_metadata + splunkevent
print_xml_single_instance_mode(destination, splunkevent)
#handle tables
elif table:
for varBindTableRow in response_object:
for name, val in varBindTableRow:
output_element = '%s = "%s" ' % (name.prettyPrint(), val.prettyPrint())
if split_bulk_output:
print_xml_single_instance_mode(destination, output_element)
else:
splunkevent += output_element
print_xml_single_instance_mode(destination, splunkevent)
#handle scalars
else:
for name, val in response_object:
splunkevent += '%s = "%s" ' % (name.prettyPrint(), val.prettyPrint())
print_xml_single_instance_mode(destination, splunkevent)
class JSONFormatterResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,destination,table=False,from_trap=False,trap_metadata=None,split_bulk_output=False,mibView=None):
#handle tables
if table:
values = []
for varBindTableRow in response_object:
row = {}
for name, val in varBindTableRow:
row[name.prettyPrint()] = val.prettyPrint()
values.append(row)
print_xml_single_instance_mode(destination, json.dumps(values))
#handle scalars
else:
values = {}
for name, val in response_object:
values[name.prettyPrint()] = val.prettyPrint()
print_xml_single_instance_mode(destination, json.dumps(values))
# prints XML stream
def print_xml_single_instance_mode(server, event):
print "<stream><event><data>%s</data><host>%s</host></event></stream>" % (
encodeXMLText(event), server)
# prints XML stream
def print_xml_multi_instance_mode(server, event, stanza):
print "<stream><event stanza=""%s""><data>%s</data><host>%s</host></event></stream>" % (
stanza, encodeXMLText(event), server)
# prints simple stream
def print_simple(s):
print "%s\n" % s
#HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event unbroken=\"1\"><data>%s</data><done/></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "")
return text | {
"repo_name": "Jaykul/SplunkModularInputsPythonFramework",
"path": "implementations/snmp/bin/responsehandlers.py",
"copies": "3",
"size": "4127",
"license": "apache-2.0",
"hash": 1244122111152573000,
"line_mean": 39.4705882353,
"line_max": 144,
"alpha_frac": 0.5350133269,
"autogenerated": false,
"ratio": 4.0740375123395856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6109050839239586,
"avg_score": null,
"num_lines": null
} |
# Add your metrics here
import pdb
import math
import scipy.stats
from itertools import chain
from parse import parse_sbm_results, parse_chip_seq, parse_merlin
from plots import plot_p_vals, plot_p_vals_both
def score_gene_weights(test_vals, true_vals, genes, tfs_to_genes):
keys = set(test_vals.keys()) & set(true_vals.keys())
num_genes = len(genes)
p_vals = []
for k in keys:
test_val = test_vals[k]
true_val = true_vals[k]
intersection = len(set(test_val) & set(true_val))
p_val = scipy.stats.hypergeom.sf(intersection, num_genes,
len(true_val), len(test_val))
if not math.isnan(p_val):
p_vals.append(p_val)
if p_val == 0.0:
continue
else:
p_val_to_append = math.log(p_val, 10)
if k in tfs_to_genes:
tfs_to_genes[k].append(p_val_to_append)
else:
tfs_to_genes[k] = [p_val_to_append]
return p_vals
def bin_results(p_vals):
bins = [0]*10
for p_val in p_vals:
i = 0
while p_val < 1 and p_val != 0:
i += 1
p_val *= 10
bins[i] += 1
return bins
# This method makes the 'universe' of both the SBM and ChIP-seq results the same
def remove_unique_genes(sbm_results, chip_results):
# First get the intersection of the two sets
overlapping_genes = set(chain(*sbm_results.values()))
overlapping_genes &= set(chain(*chip_results.values()))
# Remove all results that aren't in either of the two sets
sbm_modified = {}
chip_modified = {}
for k,v in sbm_results.items():
if k in overlapping_genes:
v = list(set(v) & overlapping_genes)
sbm_modified[k] = v
for k,v in chip_results.items():
if k in overlapping_genes:
v = list(set(v) & overlapping_genes)
chip_modified[k] = v
return sbm_modified, chip_modified, overlapping_genes
def evaluate_network():
tfs, genes, sbm_results, sbm_module_to_gene, \
sbm_gene_to_module = parse_sbm_results()
chip_results = parse_chip_seq()
merlin_results = parse_merlin()
# Compare to SBM
sbm_results, chip_results, genes = \
remove_unique_genes(sbm_results, chip_results)
tfs_to_genes = {}
p_vals = score_gene_weights(sbm_results, chip_results, genes, tfs_to_genes)
plot_p_vals(p_vals, 'Stochastic Block Model')
# Compare to MERLIN
merlin_results, chip_results, genes = \
remove_unique_genes(merlin_results, chip_results)
p_vals = score_gene_weights(merlin_results, chip_results, genes, tfs_to_genes)
plot_p_vals(p_vals, 'MERLIN')
sbm_p_vals = []
merlin_p_vals = []
for tf, value in tfs_to_genes.iteritems():
if len(value) == 2:
sbm_p_vals.append(value[0])
merlin_p_vals.append(value[1])
plot_p_vals_both(sbm_p_vals, merlin_p_vals, 'p values of merlin vs sbm')
def main():
evaluate_network()
if __name__ == '__main__':
main()
| {
"repo_name": "kevintee/Predicting-Gene-Networks",
"path": "src/metrics.py",
"copies": "1",
"size": "3027",
"license": "mit",
"hash": 2830796900812586500,
"line_mean": 30.53125,
"line_max": 82,
"alpha_frac": 0.5996035679,
"autogenerated": false,
"ratio": 3.1109969167523124,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4210600484652312,
"avg_score": null,
"num_lines": null
} |
# Add your Python code here. E.g.
from microbit import *
from bitbot import *
from neopixel import *
def dark_left():
return not button_b.is_pressed()
def dark_right():
return not button_a.is_pressed()
np = NeoPixel(pin13,12)
for i in range(0,12):
np[i] = (30,10,0)
np.show()
while True:
if dark_left() and dark_right():
for i in range(0,12):
np[i] = (100,100,100)
np.show()
display.show("B")
set_speed(40,40)
sleep(10)
elif dark_right():
for i in range(0,12):
np[i] = (0,0,100)
np.show()
display.show("R")
set_speed(40,0)
sleep(10)
elif dark_left():
for i in range(0,12):
np[i] = (30,0,0)
np.show()
set_speed(0,40)
sleep(10)
display.show("L")
else:
display.show("N")
for i in range(0,12):
np[i] = (30,30,0)
np.show()
set_speed(0,0)
sleep(10)
| {
"repo_name": "mathisgerdes/microbit-macau",
"path": "Baptist/Jason/car_driving_with_movement_changing_color.py",
"copies": "1",
"size": "1029",
"license": "mit",
"hash": 794072955607178800,
"line_mean": 18.8076923077,
"line_max": 36,
"alpha_frac": 0.4742468416,
"autogenerated": false,
"ratio": 3.0534124629080117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40276593045080117,
"avg_score": null,
"num_lines": null
} |
# Add your Python code here. E.g.
from microbit import *
from bitbot import *
from neopixel import *
np = NeoPixel(pin13,12)
for i in range(0,12):
np[i] = (40,0,40)
np.show()
def dark_left():
return not button_b.is_pressed()
def dark_right():
return not button_a.is_pressed()
while True:
if dark_left() and dark_right() :
for i in range(0,12):
np[i] = (40,0,40)
np.show()
display.show("B")
set_speed(20,20)
sleep(10)
elif dark_right():
for i in range(0,12):
np[i] = (30,30,0)
np.show()
display.show("R")
set_speed(60,10)
sleep(10)
elif dark_left():
for i in range(0,12):
np[i] = (0,0,40)
np.show()
display.show("L")
set_speed(15,75)
sleep(10)
else:
for i in range(0,12):
np[i] = (20,30,0)
np.show()
display.show("N")
set_speed(0,0)
sleep(10)
| {
"repo_name": "mathisgerdes/microbit-macau",
"path": "Baptist/Hannah/car_driving_with_movement.py",
"copies": "1",
"size": "1029",
"license": "mit",
"hash": -3683501386880767000,
"line_mean": 17.375,
"line_max": 37,
"alpha_frac": 0.472303207,
"autogenerated": false,
"ratio": 3.0353982300884956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40077014370884956,
"avg_score": null,
"num_lines": null
} |
"""A decision forest toy example.
Trains and evaluates a decision forest classifier on a 2-D point cloud.
"""
import argparse
import datetime
import os
import re
import sys
import numpy as np
from PIL import Image, ImageDraw
import sklearn.ensemble as sk_ensemble
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
class DataCollection:
"""Represents a collection of data (features) with associated labels (if any)."""
UNKNOWN_LABEL = -1
def __init__(self, dimension: int):
"""Initializes a new instance of the DataCollection class.
Args:
dimension (int): The dimension of the data (number of features).
"""
self.dimension = dimension
self.data = None # use float32 since TensorFlow does not support float64
self.labels = None
def add_data(self, data, label: int = UNKNOWN_LABEL):
"""Adds data to the collection.
Args:
data (list of float): The data, e.g. [0.1, 0.2].
label (int): The data's associated label.
Raises:
ValueError: If the data's dimension matches not the DataCollection's dimension.
"""
if len(data) != self.dimension:
raise ValueError('Data has not expected dimensionality')
if self.data is not None:
self.data = np.vstack([self.data, data]).astype(np.float32, copy=False)
else:
self.data = np.array(data, dtype=np.float32)
if label != DataCollection.UNKNOWN_LABEL and self.labels is not None:
self.labels = np.append(self.labels, [label]).astype(np.int32, copy=False)
elif label != DataCollection.UNKNOWN_LABEL:
self.labels = np.array([label], np.int32)
def has_labels(self) -> bool:
"""Determines whether the data have labels associated.
Returns:
bool: True if the data have labels associated; otherwise, False.
"""
return self.label_count() != 0
def label_count(self) -> int:
"""Determines the number of labels.
Returns:
int: The number of labels.
"""
unique_labels = np.unique(self.labels)
return unique_labels.size
class Reader:
"""Represents a point reader, which reads a list of points from a text file.
The text file needs to have the following format:
1 231.293210 201.938881
1 164.756169 162.208593
2 859.625948 765.342651
3 839.740553 228.076223
Where the first column is the label y of the point. The second and third columns are x1 and x2,
i.e. the features one and two (or in other words the (x, y) coordinates of the 2-D point).
The above example contains four points.
"""
@staticmethod
def load(file_path) -> DataCollection:
"""Loads a file that contains 2-dimensional data.
Returns:
DataCollection: A data collection with points.
"""
data = DataCollection(2)
file = open(file_path, 'r')
for line in file:
values = re.split(r'\t+', line)
data.add_data([float(values[1]), float(values[2])], int(values[0]) - 1)
return data
class Generator:
"""Represents a point generator.
The points have an integer spacing and no associated labels.
"""
@staticmethod
def get_test_data(grid_size: int):
"""Gets testing data.
Args:
grid_size (int): The point cloud grid size.
Returns:
np.ndarray: An array of test data.
"""
rng = np.linspace(0, grid_size - 1, grid_size)
grid = np.meshgrid(rng, rng)
return np.append(grid[0].reshape(-1, 1), grid[1].reshape(-1, 1), axis=1).astype(np.float32, copy=False)
@staticmethod
def get_test_data_with_label(grid_size: int):
"""Gets testing data.
Args:
grid_size (int): The point cloud grid size.
Returns:
np.ndarray, np.ndarray: Arrays of test data and labels.
"""
data = Generator.get_test_data(grid_size)
labels = np.zeros((data.shape[0], 1)).astype(np.int32)
return data, labels
class Plotter:
"""Represents a point plotter, which plots a list of points as image."""
def __init__(self):
"""Initializes a new PointPlotter class.
The plotter supports up to four class labels.
"""
self.image = Image.new('RGB', (1000, 1000), (255, 255, 255))
self.draw = ImageDraw.Draw(self.image)
# default label colors (add more colors to support more labels)
self.label_colors = [(183, 170, 8),
(194, 32, 14),
(4, 154, 10),
(13, 26, 188)]
def save(self, file_name: str):
"""Saves the plot as PNG image.
Args:
file_name (str): The file name.
"""
if not file_name.lower().endswith('.png'):
file_name += '.png'
self.image.save(file_name, 'PNG')
def plot_points(self, data, labels, radius=3):
"""Plots points on an image.
Args:
data (np.ndarray): The data (point coordinates) to plot.
labels (np.ndarray): The data's associated labels.
radius (int): The point radius.
"""
it = np.nditer(labels, flags=['f_index'])
while not it.finished:
value = data[it.index]
x = int(value[0])
y = int(value[1])
label = labels[it.index]
fill = self.label_colors[label]
self.draw.ellipse((x - radius, y - radius, x + radius, y + radius), fill=fill, outline=(0, 0, 0))
it.iternext()
def plot_pixels_proba(self, data, probabilities):
"""Plots probabilities on an image.
Args:
data (np.ndarray): The data (probability coordinates) to plot.
probabilities (np.ndarray): The data's associated probabilities.
"""
it = np.nditer(probabilities, flags=['f_index'])
for idx in range(probabilities.shape[0]):
value = data[idx]
x = int(value[0])
y = int(value[1])
probability = probabilities[idx]
fill = self.get_color(probability)
self.draw.point((x, y), fill=fill)
it.iternext()
def get_color(self, label_probabilities: np.ndarray):
"""Gets the color for a probability.
Args:
label_probabilities (np.ndarray): The probabilities.
Returns:
(int, int, int): A tuple representing an RGB color code.
"""
color = np.array([0.0, 0.0, 0.0])
for i in range(label_probabilities.size):
weighted_color = np.multiply(np.array(self.label_colors[i]), label_probabilities[i])
color += weighted_color
return int(color[0]), int(color[1]), int(color[2])
def main(result_dir: str, input_file: str):
"""Trains a decision forest classifier on a two-dimensional point cloud."""
# generate result directory
os.makedirs(result_dir, exist_ok=True)
# read file with training data
data = Reader.load(input_file)
# generate testing data
test_data = Generator.get_test_data(1000)
# initialize the forest
forest = sk_ensemble.RandomForestClassifier(max_features=data.dimension,
n_estimators=10,
max_depth=10)
# train the forest
print('Decision forest training...')
forest.fit(data.data, data.labels)
# apply the forest to test data
print('Decision forest testing...')
predictions = forest.predict(test_data)
probabilities = forest.predict_proba(test_data)
# let's have a look at the feature importance
print(forest.feature_importances_)
# plot the result
print('Plotting...')
t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
plotter = Plotter()
plotter.plot_pixels_proba(test_data, np.array(probabilities))
plotter.plot_points(data.data, data.labels)
plotter.save(os.path.join(result_dir, 'result_{}.png'.format(t)))
if __name__ == "__main__":
"""The program's entry point."""
script_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='2-dimensional point classification with decision forests')
parser.add_argument(
'--result_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, 'toy-example-result')),
help='Directory for results.'
)
parser.add_argument(
'--input_file',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/exp1_n4.txt')),
help='Input file with 2-dimensional coordinates and corresponding label.'
)
args = parser.parse_args()
main(args.result_dir, args.input_file)
| {
"repo_name": "istb-mia/MIALab",
"path": "bin/toy_example.py",
"copies": "1",
"size": "9158",
"license": "apache-2.0",
"hash": 3607636775924592600,
"line_mean": 31.020979021,
"line_max": 119,
"alpha_frac": 0.5958724612,
"autogenerated": false,
"ratio": 3.9474137931034483,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5043286254303448,
"avg_score": null,
"num_lines": null
} |
# A deck with 26 red and 26 black.
# Payoff: Red = +1, Black = -1
# Can stop any time.
# Find the best strategy
# Stop when payoff reach k and remaining cards only x left
import random
class Deck:
def __init__(self, plus, minus):
self.plus_cards = plus
self.minus_cards = minus
def draw(self):
deck_count = self.plus_cards + self.minus_cards
if deck_count > 0:
p_plus = self.plus_cards / deck_count
if random.uniform(0,1) < p_plus:
self.plus_cards += -1
return 1
else:
self.minus_cards += -1
return -1
else:
return 0
def terminate_condition(deck, payoff):
deck_count = deck.plus_cards + deck.minus_cards
if deck_count == 0: return True
elif payoff > 3: return True
else: False
def play_game(terminate_condition_method, *args):
payoff = 0
deck = Deck(26,26)
while not(terminate_condition_method(*args)):
payoff += deck.draw()
return payoff
def play_many_games(count):
total_payoff = 0
for x in range(count):
total_payoff += play_game(terminate_condition(deck, payoff))
return total_payoff / count
print(play_many_games(10000))
| {
"repo_name": "laichunpongben/machine_learning",
"path": "deck.py",
"copies": "1",
"size": "1280",
"license": "apache-2.0",
"hash": 513666909234187600,
"line_mean": 26.2340425532,
"line_max": 68,
"alpha_frac": 0.5796875,
"autogenerated": false,
"ratio": 3.506849315068493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4586536815068493,
"avg_score": null,
"num_lines": null
} |
"""A decoder for LZPT compressed image files"""
# Kernel source: arch/arm/include/asm/mach/warmboot.h
# Kernel source: arch/arm/mach-cxd90014/include/mach/cmpr.h
import io
from stat import *
from . import *
from .. import lz77
from ..io import *
from ..util import *
# struct wbi_lzp_hdr
LzptHeader = Struct('LzptHeader', [
('magic', Struct.STR % 4),
('blockSize', Struct.INT32),
('tocOffset', Struct.INT32),
('tocSize', Struct.INT32),
])
# CMPR_LZPART_MAGIC
lzptHeaderMagic = b'TPZL'
# struct wbi_lzp_entry
LzptTocEntry = Struct('LzptTocEntry', [
('offset', Struct.INT32),
('size', Struct.INT32),
])
def isLzpt(file):
"""Checks if the LZTP header is present"""
header = LzptHeader.unpack(file)
return header and header.magic == lzptHeaderMagic
def readLzpt(file):
"""Decodes an LZTP image and returns its contents"""
header = LzptHeader.unpack(file)
if header.magic != lzptHeaderMagic:
raise Exception('Wrong magic')
tocEntries = [LzptTocEntry.unpack(file, header.tocOffset + offset) for offset in range(0, header.tocSize, LzptTocEntry.size)]
def generateChunks():
for entry in tocEntries:
file.seek(entry.offset)
block = io.BytesIO(file.read(entry.size))
read = 0
while read < 2 ** header.blockSize:
contents = lz77.inflateLz77(block)
yield contents
read += len(contents)
yield UnixFile(
path = '',
size = -1,
mtime = 0,
mode = S_IFREG,
uid = 0,
gid = 0,
contents = ChunkedFile(generateChunks),
)
| {
"repo_name": "ma1co/fwtool.py",
"path": "fwtool/archive/lzpt.py",
"copies": "1",
"size": "1468",
"license": "mit",
"hash": 8148032675510572000,
"line_mean": 22.6774193548,
"line_max": 126,
"alpha_frac": 0.6927792916,
"autogenerated": false,
"ratio": 2.906930693069307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40997099846693064,
"avg_score": null,
"num_lines": null
} |
''' A decorator-based method of constructing IPython magics with `argparse`
option handling.
New magic functions can be defined like so::
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
@magic_arguments()
@argument('-o', '--option', help='An optional argument.')
@argument('arg', type=int, help='An integer positional argument.')
def magic_cool(self, arg):
""" A really cool magic command.
"""
args = parse_argstring(magic_cool, arg)
...
The `@magic_arguments` decorator marks the function as having argparse arguments.
The `@argument` decorator adds an argument using the same syntax as argparse's
`add_argument()` method. More sophisticated uses may also require the
`@argument_group` or `@kwds` decorator to customize the formatting and the
parsing.
Help text for the magic is automatically generated from the docstring and the
arguments::
In[1]: %cool?
%cool [-o OPTION] arg
A really cool magic command.
positional arguments:
arg An integer positional argument.
optional arguments:
-o OPTION, --option OPTION
An optional argument.
Inheritance diagram:
.. inheritance-diagram:: IPython.core.magic_arguments
:parts: 3
'''
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Our own imports
from IPython.external import argparse
from IPython.core.error import UsageError
from IPython.utils.process import arg_split
from IPython.utils.text import dedent
class MagicHelpFormatter(argparse.RawDescriptionHelpFormatter):
""" A HelpFormatter which dedents but otherwise preserves indentation.
"""
def _fill_text(self, text, width, indent):
return argparse.RawDescriptionHelpFormatter._fill_text(self, dedent(text), width, indent)
class MagicArgumentParser(argparse.ArgumentParser):
""" An ArgumentParser tweaked for use by IPython magics.
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
parents=None,
formatter_class=MagicHelpFormatter,
prefix_chars='-',
argument_default=None,
conflict_handler='error',
add_help=False):
if parents is None:
parents = []
super(MagicArgumentParser, self).__init__(prog=prog, usage=usage,
description=description, epilog=epilog,
parents=parents, formatter_class=formatter_class,
prefix_chars=prefix_chars, argument_default=argument_default,
conflict_handler=conflict_handler, add_help=add_help)
def error(self, message):
""" Raise a catchable error instead of exiting.
"""
raise UsageError(message)
def parse_argstring(self, argstring):
""" Split a string into an argument list and parse that argument list.
"""
argv = arg_split(argstring)
return self.parse_args(argv)
def construct_parser(magic_func):
""" Construct an argument parser using the function decorations.
"""
kwds = getattr(magic_func, 'argcmd_kwds', {})
if 'description' not in kwds:
kwds['description'] = getattr(magic_func, '__doc__', None)
arg_name = real_name(magic_func)
parser = MagicArgumentParser(arg_name, **kwds)
# Reverse the list of decorators in order to apply them in the
# order in which they appear in the source.
group = None
for deco in magic_func.decorators[::-1]:
result = deco.add_to_parser(parser, group)
if result is not None:
group = result
# Replace the starting 'usage: ' with IPython's %.
help_text = parser.format_help()
if help_text.startswith('usage: '):
help_text = help_text.replace('usage: ', '%', 1)
else:
help_text = '%' + help_text
# Replace the magic function's docstring with the full help text.
magic_func.__doc__ = help_text
return parser
def parse_argstring(magic_func, argstring):
""" Parse the string of arguments for the given magic function.
"""
return magic_func.parser.parse_argstring(argstring)
def real_name(magic_func):
""" Find the real name of the magic.
"""
magic_name = magic_func.__name__
if magic_name.startswith('magic_'):
magic_name = magic_name[len('magic_'):]
return getattr(magic_func, 'argcmd_name', magic_name)
class ArgDecorator(object):
""" Base class for decorators to add ArgumentParser information to a method.
"""
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
func.decorators.append(self)
return func
def add_to_parser(self, parser, group):
""" Add this object's information to the parser, if necessary.
"""
pass
class magic_arguments(ArgDecorator):
""" Mark the magic as having argparse arguments and possibly adjust the
name.
"""
def __init__(self, name=None):
self.name = name
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
if self.name is not None:
func.argcmd_name = self.name
# This should be the first decorator in the list of decorators, thus the
# last to execute. Build the parser.
func.parser = construct_parser(func)
return func
class ArgMethodWrapper(ArgDecorator):
"""
Base class to define a wrapper for ArgumentParser method.
Child class must define either `_method_name` or `add_to_parser`.
"""
_method_name = None
def __init__(self, *args, **kwds):
self.args = args
self.kwds = kwds
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
if group is not None:
parser = group
getattr(parser, self._method_name)(*self.args, **self.kwds)
return None
class argument(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument().
Instances also serve to decorate command methods.
"""
_method_name = 'add_argument'
class defaults(ArgMethodWrapper):
""" Store arguments and keywords to pass to set_defaults().
Instances also serve to decorate command methods.
"""
_method_name = 'set_defaults'
class argument_group(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument_group().
Instances also serve to decorate command methods.
"""
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
return parser.add_argument_group(*self.args, **self.kwds)
class kwds(ArgDecorator):
""" Provide other keywords to the sub-parser constructor.
"""
def __init__(self, **kwds):
self.kwds = kwds
def __call__(self, func):
func = super(kwds, self).__call__(func)
func.argcmd_kwds = self.kwds
return func
__all__ = ['magic_arguments', 'argument', 'argument_group', 'kwds',
'parse_argstring']
| {
"repo_name": "marcoantoniooliveira/labweb",
"path": "oscar/lib/python2.7/site-packages/IPython/core/magic_arguments.py",
"copies": "2",
"size": "7640",
"license": "bsd-3-clause",
"hash": -7192829777159592000,
"line_mean": 30.0569105691,
"line_max": 97,
"alpha_frac": 0.6189790576,
"autogenerated": false,
"ratio": 4.3359818388195235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5954960896419523,
"avg_score": null,
"num_lines": null
} |
# A decorator that lets you require HTTP basic authentication from visitors.
# Kevin Kelley <kelleyk@kelleyk.net> 2011
# Use however makes you happy, but if it breaks, you get to keep both pieces.
# Post with explanation, commentary, etc.:
# http://kelleyk.com/post/7362319243/easy-basic-http-authentication-with-tornado
import base64, logging
import tornado.web
import twilio # From https://github.com/twilio/twilio-python
def require_basic_auth(handler_class):
def wrap_execute(handler_execute):
def require_basic_auth(handler, kwargs):
auth_header = handler.request.headers.get('Authorization')
if auth_header is None or not auth_header.startswith('Basic '):
handler.set_status(401)
handler.set_header('WWW-Authenticate', 'Basic realm=Restricted')
handler._transforms = []
handler.finish()
return False
auth_decoded = base64.decodestring(auth_header[6:])
kwargs['basicauth_user'], kwargs['basicauth_pass'] = auth_decoded.split(':', 2)
return True
def _execute(self, transforms, *args, **kwargs):
if not require_basic_auth(self, kwargs):
return False
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
twilio_account_sid = 'INSERT YOUR ACCOUNT ID HERE'
twilio_account_token = 'INSERT YOUR ACCOUNT TOKEN HERE'
@require_basic_auth
class TwilioRequestHandler(tornado.web.RequestHandler):
def post(self, basicauth_user, basicauth_pass):
"""
Receive a Twilio request, return a TwiML response
"""
# We check in two ways that it's really Twilio POSTing to this URL:
# 1. Check that Twilio is sending the username and password we specified
# for it at https://www.twilio.com/user/account/phone-numbers/incoming
# 2. Check that Twilio has signed its request with our secret account token
username = 'CONFIGURE USERNAME AT TWILIO.COM AND ENTER IT HERE'
password = 'CONFIGURE PASSWORD AT TWILIO.COM AND ENTER IT HERE'
if basicauth_user != username or basicauth_pass != password:
raise tornado.web.HTTPError(401, "Invalid username and password for HTTP basic authentication")
# Construct the URL to this handler.
# self.request.full_url() doesn't work, because Twilio sort of has a bug:
# We tell it to POST to our URL with HTTP Authentication like this:
# http://username:password@b-date.me/api/twilio_request_handler
# ... and Twilio uses *that* URL, with username and password included, as
# part of its signature.
# Also, if we're proxied by Nginx, then Nginx handles the HTTPS protocol and
# connects to Tornado over HTTP
protocol = 'https' if self.request.headers.get('X-Twilio-Ssl') == 'Enabled' else self.request.protocol
url = '%s://%s:%s@%s%s' % (
protocol, username, password, self.request.host, self.request.path,
)
if not twilio.Utils(twilio_account_sid, twilio_account_token).validateRequest(
url,
# arguments has lists like { 'key': [ 'value', ... ] }, so flatten them
{
k: self.request.arguments[k][0]
for k in self.request.arguments
},
self.request.headers.get('X-Twilio-Signature'),
):
logging.error("Invalid Twilio signature to %s: %s" % (
self.request.full_url(), self.request
))
raise tornado.web.HTTPError(401, "Invalid Twilio signature")
# Do your actual processing of Twilio's POST here, using self.get_argument()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577893_Securely_processing_Twilio_requests/recipe-577893.py",
"copies": "1",
"size": "3838",
"license": "mit",
"hash": -3116494462635759000,
"line_mean": 46.3827160494,
"line_max": 110,
"alpha_frac": 0.6406982804,
"autogenerated": false,
"ratio": 3.9607843137254903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510148259412549,
"avg_score": null,
"num_lines": null
} |
""" A decorator that solves the problem of default parameter values that become global data structures.
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Date: 2016-10-01
:Copyright: 2016-2018, Karr Lab
:License: MIT
"""
from functools import wraps
# a map from parameter name prefix or suffix to data type
prefix_suffix_types = { 'list':'list', 'dict':'dict', 'set':'set'}
def typed( param ):
''' Indicate whether the `param` indicates a data type
Args:
param (:obj:`str`): a variable name whose prefix or suffix might indicate its data type,
which would be one of 'list', 'dict', or 'set'
Returns:
:obj:`boolean`: True if `param` indicates a data type
'''
return (param.endswith( tuple( map( lambda x: '_'+x, prefix_suffix_types.keys()) ) )or
param.startswith( tuple( map( lambda x: x+'_', prefix_suffix_types.keys()) ) ) )
def none_to_empty( param, value ):
''' If value is None, return an empty data structure whose type is indicated by param
Args:
param (:obj:`str`): a variable name whose prefix or suffix indicates its data type
value (:obj:`obj`): a value, which might be None
Returns:
:obj:`obj`: value unmodified, or if value is None, an empty data structure whose
type is indicated by param
'''
if value is None:
for key in prefix_suffix_types.keys():
if param.endswith( '_'+key ) or param.startswith( key+'_' ):
return eval( prefix_suffix_types[key] + '()' )
return value
def default_mutable_params(mutable_args):
"""A function or method decorator that handles mutable optional parameters.
Optional parameters with mutable default values like d and l in "def f( d={}, l=[])" have
the awkward behavior that a global mutable data strcture is created when the function (or
method) is defined, that references to the parameter access this data structure, and that
all calls to the function which do not provide the parameter refer to this data
structure. This differs from the semantics naive Python programmers expect, which is that
calls that don't provide the parameter initialize it as an empty data structure.
Somewhat surprisingly, the Python Language Reference
recommends (https://docs.python.org/3.5/reference/compound_stmts.html#function-definitions)
that this behavior be fixed by defining the
default value for such optional parameters as None, and setting the parameter as empty
data structure if it is not provided (or is provided as None). However, this is cumbersome,
especially if the function contains a large number of such parameters.
This decorator transforms optional parameters whose default values None into mutable data
structures of the appropriate type. The parameters must have names whose prefix or suffix
indicates their data type (as in so-called Hungarian or rudder notation). The mutable
parameters are provided as a list to the decorator. The decorated function uses None as
default values for these parameters. Calls to the decorated function replace optional
parameters whose value is None with the appropriate empty data structure. For example,
consider::
@default_mutable_params( ['d_dict', 'list_l', 's_set'] )
def test3( a, d_dict=None, list_l=None, s_set=None, l2=[4] )
The call::
test3( 1, d_dict={3}, list_l=None, s_set=None, l2=None )
will be transformed into::
test3( 1, d_dict={3}, list_l=[], s_set=set(), l2=None )
where the values of ``list_l`` and ``s_set`` are local variables.
Args:
mutable_args (:obj:`list`): list of optional parameters whose default values are mutable
data structure.
Returns:
:obj:`type`: description
Raises:
:obj:`ValueError`: if an argument to @default_mutable_params does not indicate
the type of its aggregate data structure
TODO(Arthur): An alternative way to define default_params_decorator and avoid the need to
add the type to the name of each parameter and select parameters for the decorator,
would be to copy the target function's signature as the decorator's argument, parse the
signature with compile(), and then use the parse's AST to determine the optional parameters
with default datastructures, and their data types.
"""
def default_params_decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
for mutable in mutable_args:
if not typed(mutable):
raise ValueError("Arguments to @default_mutable_params must indicate their type in "
"the name prefix or suffix, but '{}' does not.".format(mutable))
if mutable in list(kwargs.keys()):
kwargs[mutable] = none_to_empty(mutable, kwargs[mutable])
else:
kwargs[mutable] = none_to_empty(mutable, None)
return func(*args, **kwargs)
return func_wrapper
return default_params_decorator
| {
"repo_name": "KarrLab/wc_utils",
"path": "wc_utils/util/decorate_default_data_struct.py",
"copies": "1",
"size": "5088",
"license": "mit",
"hash": 3963795341919177700,
"line_mean": 44.4285714286,
"line_max": 104,
"alpha_frac": 0.6778694969,
"autogenerated": false,
"ratio": 4.330212765957447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5508082262857448,
"avg_score": null,
"num_lines": null
} |
"""A dedicated command line parser.
Remark: We do not use the parsers from the standard
library because they do not fit our needs
(especially passing arbitrary arguments to the
processed documents).
"""
class CmdLineArguments:
def __init__(self):
self.input_filename = None
self.output_directory = "tango_output"
self.output_type = None
self.modes = set()
self.code_active = False
self.safe_mode = False
self.banner = False
self.help = False
self.extra_options = dict()
def __str__(self):
return \
"""Banner = {}
Help = {}
Output type = {}
Modes = {}
Code Active = {}
Safe Mode = {}
Input file name = {}
Output directory = {}
Extra options = {}
""".format(self.banner,
self.help,
self.output_type,
self.modes,
self.code_active,
self.safe_mode,
self.input_filename,
self.output_directory,
self.extra_options)
class CmdLineError(Exception):
pass
class CmdLineParser:
def __init__(self, argv):
self.argv = argv
self.cmd_args = CmdLineArguments()
def parse(self):
self.tango_cmd = self.argv[0]
cmd_args = self.argv[1:]
while cmd_args:
cmd_args = self.parse_next(cmd_args)
return self.cmd_args
def parse_next(self, cmd_args):
next_opt = cmd_args[0]
if next_opt == "--latex" or next_opt == "-l":
if self.cmd_args.output_type is not None:
raise CmdLineError("Mismatch {} option : output type already set".format(next_opt))
self.cmd_args.output_type = "latex"
return cmd_args[1:]
elif next_opt == "--codeactive":
self.cmd_args.code_active = True
return cmd_args[1:]
elif next_opt == "--banner":
self.cmd_args.banner = True
return cmd_args[1:]
elif next_opt == "--help" or next_opt == "-h":
self.cmd_args.help = True
return cmd_args[1:]
elif next_opt == "--mode" or next_opt == "-m":
cmd_args = cmd_args[1:]
if not cmd_args:
raise CmdLineError("Missing mode")
if cmd_args[0].startswith("-"):
raise CmdLineError("Missing mode before {}".format(cmd_args[0]))
modes_str = cmd_args[0]
modes = modes_str.split(",")
self.cmd_args.modes = self.cmd_args.modes.union(set(modes))
return cmd_args[1:]
elif next_opt == "--output" or next_opt == "-o":
cmd_args = cmd_args[1:]
if not cmd_args:
raise CmdLineError("Missing output directory")
if cmd_args[0].startswith("-"):
raise CmdLineError("Missing output directory before {}".format(cmd_args[0]))
self.cmd_args.output_directory = cmd_args[0]
return cmd_args[1:]
elif next_opt == "--safe-mode" or next_opt == "-s":
self.cmd_args.safe_mode = True
return cmd_args[1:]
elif not next_opt.startswith("-"):
if self.cmd_args.input_filename is not None:
raise CmdLineError("Cannot handle '{}': input file already set".format(next_opt))
self.cmd_args.input_filename = next_opt
return cmd_args[1:]
else:
next_opt = next_opt.lstrip('-')
cmd_args = cmd_args[1:]
if cmd_args and not cmd_args[0].startswith("-"):
self.cmd_args.extra_options[next_opt] = cmd_args[0]
return cmd_args[1:]
self.cmd_args.extra_options[next_opt] = True
return cmd_args
# Special global variable for parsed command line arguments
GLOBAL_COMMAND_LINE_ARGUMENTS = None
def set_global_command_line_arguments(args):
global GLOBAL_COMMAND_LINE_ARGUMENTS
GLOBAL_COMMAND_LINE_ARGUMENTS = args
def get_global_command_line_arguments():
global GLOBAL_COMMAND_LINE_ARGUMENTS
return GLOBAL_COMMAND_LINE_ARGUMENTS
if __name__ == "__main__":
import sys
print("command line = {}".format(sys.argv))
cmdline_parser = CmdLineParser(sys.argv)
cmd_args = cmdline_parser.parse()
print(cmd_args)
| {
"repo_name": "fredokun/tango",
"path": "src/tangolib/cmdparse.py",
"copies": "1",
"size": "4309",
"license": "mit",
"hash": 575910177794071040,
"line_mean": 27.9194630872,
"line_max": 99,
"alpha_frac": 0.5595265723,
"autogenerated": false,
"ratio": 3.8542039355992843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49137305078992843,
"avg_score": null,
"num_lines": null
} |
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import util
from ps import ParameterServer
from util.log import log
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10),
with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
vs = []
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images
# are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
W_conv1 = weight_variable([5, 5, 1, 32], vs)
b_conv1 = bias_variable([32], vs)
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
W_conv2 = weight_variable([5, 5, 32, 64], vs)
b_conv2 = bias_variable([64], vs)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
W_fc1 = weight_variable([7 * 7 * 64, 1024], vs)
b_fc1 = bias_variable([1024], vs)
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
W_fc2 = weight_variable([1024, 10], vs)
b_fc2 = bias_variable([10], vs)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob, vs
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape, vs):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
v = tf.Variable(initial)
vs.append(v)
return v
def bias_variable(shape, vs):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
v = tf.Variable(initial)
vs.append(v)
return v
iter_total = 10
batch_size = 200
def run(raw_data):
tf.reset_default_graph()
message = raw_data[0]
worker_id = raw_data[1]
iteration_id = raw_data[2]
train_id = message['train_id']
parallel_count = message['parallel_count']
offset = int(iteration_id) * iter_total
logs_path = '/tmp/tensorflow_logs/%s/%s/%d' % (
util.yymmdd(), 'cnn_mnist', parallel_count)
log.warn('Run cnn_mnist(%s, %s)' % (train_id, worker_id))
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
y_conv, keep_prob, variables = deepnn(x)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy)
tf.summary.scalar('cross_entropy', cross_entropy)
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(
logs_path, graph=tf.get_default_graph())
ps_conn = ParameterServer(
sess, train_id, worker_id, iteration_id, variables, None,
parallel_count)
ps_conn.load_variables()
for i in range(0, iter_total):
batch = mnist.train.next_batch(batch_size)
summary, acc, ce, _ = sess.run(
[merged, accuracy, cross_entropy, train_step],
feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
summary_writer.add_summary(summary, offset + i)
summary_writer.flush()
summary_writer.close()
ps_conn.save_variables()
# summary, acc = sess.run(
# [merged, accuracy],
# feed_dict={
# x: mnist.test.images,
# y_: mnist.test.labels,
# keep_prob: 1.0})
# print('acc: %s' % acc)
return True
| {
"repo_name": "jclee81/sktacc",
"path": "sktps/ml/cnn_mnist.py",
"copies": "1",
"size": "5585",
"license": "apache-2.0",
"hash": 2219649487831803100,
"line_mean": 32.4431137725,
"line_max": 80,
"alpha_frac": 0.630438675,
"autogenerated": false,
"ratio": 3.213463751438435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.934353279533028,
"avg_score": 0.00007392622163081244,
"num_lines": 167
} |
# A demo for the IDsObjectPicker interface.
import win32clipboard
import pythoncom
from win32com.adsi import adsi
from win32com.adsi.adsicon import *
cf_objectpicker = win32clipboard.RegisterClipboardFormat(CFSTR_DSOP_DS_SELECTION_LIST)
def main():
hwnd = 0
# Create an instance of the object picker.
picker = pythoncom.CoCreateInstance(adsi.CLSID_DsObjectPicker,
None,
pythoncom.CLSCTX_INPROC_SERVER,
adsi.IID_IDsObjectPicker)
# Create our scope init info.
siis = adsi.DSOP_SCOPE_INIT_INFOs(1)
sii = siis[0]
# Combine multiple scope types in a single array entry.
sii.type = DSOP_SCOPE_TYPE_UPLEVEL_JOINED_DOMAIN | \
DSOP_SCOPE_TYPE_DOWNLEVEL_JOINED_DOMAIN
# Set uplevel and downlevel filters to include only computer objects.
# Uplevel filters apply to both mixed and native modes.
# Notice that the uplevel and downlevel flags are different.
sii.filterFlags.uplevel.bothModes = DSOP_FILTER_COMPUTERS
sii.filterFlags.downlevel = DSOP_DOWNLEVEL_FILTER_COMPUTERS
# Initialize the interface.
picker.Initialize(
None, # Target is the local computer.
siis, # scope infos
DSOP_FLAG_MULTISELECT, # options
('objectGUID','displayName') ) # attributes to fetch
do = picker.InvokeDialog(hwnd)
# Extract the data from the IDataObject.
format_etc = (cf_objectpicker, None,
pythoncom.DVASPECT_CONTENT, -1,
pythoncom.TYMED_HGLOBAL)
medium = do.GetData(format_etc)
data = adsi.StringAsDS_SELECTION_LIST(medium.data)
for item in data:
name, klass, adspath, upn, attrs, flags = item
print "Item", name
print " Class:", klass
print " AdsPath:", adspath
print " UPN:", upn
print " Attrs:", attrs
print " Flags:", flags
if __name__=='__main__':
main()
| {
"repo_name": "fernandoacorreia/DjangoWAWSLogging",
"path": "DjangoWAWSLogging/env/Lib/site-packages/pywin32-218-py2.7-win32.egg/win32comext/adsi/demos/objectPicker.py",
"copies": "35",
"size": "1981",
"license": "mit",
"hash": 939214841134219300,
"line_mean": 33.1551724138,
"line_max": 86,
"alpha_frac": 0.6370519939,
"autogenerated": false,
"ratio": 3.634862385321101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# a demonstration of buble sort and merge sort, p is a list of integers
# swap the values if p[i+1] < p[i]
def bubblesort(p):
for j in range(len(p)):
for i in range(len(p)-1):
if p[i] > p[i+1]:
temp = p[i]
p[i] = p[i+1]
p[i+1] = temp
return p
# we can do better, when there is no swap, we stop the for loop
def bubblesort2(p):
swapped = True
while swapped:
swapped = False
for i in range(len(p)-1):
if p[i] > p[i+1]:
temp = p[i]
p[i] = p[i+1]
p[i+1] = temp
swapped = True
return p
# divide and conqure method merge-sort
# divide the list in half, continue until we have single list, merge sub-lists
def mergesort(p):
# print p
if len(p) < 2 :
return p[:]
else:
middle = len(p) / 2
left = mergesort(p[:middle]) # this is the beauty of recurrsion, further break down the list to smaller lists
right =mergesort(p[middle:])
together = merge(left, right)
#print "merged", together
return together
# now we need to define the merge function
def merge(left, right):
result = []
i,j = 0,0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i = i+1
else:
result.append(right[j])
j = j+1
while i< len(left):
result.append(left[i])
i = i + 1
while j < len(right):
result.append(right[j])
j = j + 1
return result
| {
"repo_name": "crazyhottommy/some-unorganized-old-scripts",
"path": "python_scripts/bubble_and_merge_sort.py",
"copies": "1",
"size": "1817",
"license": "mit",
"hash": 2407485614822675500,
"line_mean": 26.9538461538,
"line_max": 123,
"alpha_frac": 0.456796918,
"autogenerated": false,
"ratio": 3.7775467775467777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47343436955467777,
"avg_score": null,
"num_lines": null
} |
"""A demonstration of LKD that display all files opened by hooking nt!NtCreateFile"""
import sys
import time
import ctypes
import os
if os.getcwd().endswith("example"):
sys.path.append(os.path.realpath(".."))
else:
sys.path.append(os.path.realpath("."))
import windows
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
from dbginterface import LocalKernelDebugger
kdbg = LocalKernelDebugger()
if windows.system.bitness != 32:
raise ValueError("Test for kernel32 only")
def hook_ntcreatefile(kdbg, ignore_jump_space_check=False):
"""Hook NtCreateFile, the hook write the filename to a shared memory page"""
nt_create_file = kdbg.resolve_symbol("nt!NtCreateFile")
if not ignore_jump_space_check:
# Check that function begin with mov edi, edi for the hook short jump
if kdbg.read_word(nt_create_file) != 0xff8b: # mov edi, edi
print(hex(kdbg.read_word(nt_create_file)))
raise ValueError("Cannot hook fonction that doest not begin with <mov edi,edi> (/f to force if hook already in place)")
# Check there is 5 bytes free before for the hook long jump
if kdbg.read_virtual_memory(nt_create_file - 5, 5) not in ["\x90" * 5, "\xCC" * 5]: #NOP * 5 ; INT 3 * 5
print(kdbg.read_virtual_memory(nt_create_file - 5, 5))
raise ValueError("Cannot hook fonction that is not prefixed with 5 nop/int 3")
# Allocate memory for the shared buffer kernel<->user
# the format is:
# [addr] -> size of size already taken
# then:
# DWORD string_size
# char[string_size] filename
data_kernel_addr = kdbg.alloc_memory(0x1000)
kdbg.write_pfv_memory(data_kernel_addr, "\x00" * 0x1000)
# Map the shared buffer to userland
data_user_addr = kdbg.map_page_to_userland(data_kernel_addr, 0x1000)
# Allocate memory for the hook
shellcode_addr = kdbg.alloc_memory(0x1000)
# shellcode
shellcode = x86.MultipleInstr()
# Save register
shellcode += x86.Push('EAX')
shellcode += x86.Push('ECX')
shellcode += x86.Push('EDI')
shellcode += x86.Push('ESI')
# Check that there is space remaining, else don't write it
shellcode += x86.Cmp(x86.deref(data_kernel_addr), 0x900)
shellcode += x86.Jnb(":END")
# Get 3rd arg (POBJECT_ATTRIBUTES ObjectAttributes)
shellcode += x86.Mov('EAX', x86.mem('[ESP + 0x1c]')) # 0xc + 0x10 for push
# Get POBJECT_ATTRIBUTES.ObjectName (PUNICODE_STRING)
shellcode += x86.Mov('EAX', x86.mem('[EAX + 0x8]'))
shellcode += x86.Xor('ECX', 'ECX')
# Get PUNICODE_STRING.Length
shellcode += x86.Mov('CX', x86.mem('[EAX + 0]'))
# Get PUNICODE_STRING.Buffer
shellcode += x86.Mov('ESI', x86.mem('[EAX + 4]'))
# Get the next free bytes in shared buffer
shellcode += x86.Mov('EDI', data_kernel_addr + 4)
shellcode += x86.Add('EDI', x86.deref(data_kernel_addr))
# Write (DWORD string_size) in our 'struct'
shellcode += x86.Mov(x86.mem('[EDI]'), 'ECX')
# update size taken in shared buffer
shellcode += x86.Add(x86.deref(data_kernel_addr), 'ECX')
shellcode += x86.Add(x86.deref(data_kernel_addr), 4)
# Write (char[string_size] filename) in our 'struct'
shellcode += x86.Add('EDI', 4)
shellcode += x86.Rep + x86.Movsb()
shellcode += x86.Label(":END")
# Restore buffer
shellcode += x86.Pop('ESI')
shellcode += x86.Pop('EDI')
shellcode += x86.Pop('ECX')
shellcode += x86.Pop('EAX')
# Jump to NtCreateFile
shellcode += x86.JmpAt(nt_create_file + 2)
# Write shellcode
kdbg.write_pfv_memory(shellcode_addr, shellcode.get_code())
long_jump = x86.Jmp(shellcode_addr - (nt_create_file - 5))
# Write longjump to shellcode
kdbg.write_pfv_memory(nt_create_file - 5, long_jump.get_code())
# Write shortjump NtCreateFile -> longjump
short_jmp = x86.Jmp(-5)
kdbg.write_pfv_memory(nt_create_file, short_jmp.get_code())
# Return address of shared buffer in userland
return data_user_addr
class FilenameReader(object):
def __init__(self, data_addr):
self.data_addr = data_addr
self.current_data = data_addr + 4
def get_current_filenames(self):
res = []
while True:
t = self.get_one_filename()
if t is None:
break
res.append(t)
return res
def get_one_filename(self):
# Read string size
size = ctypes.c_uint.from_address(self.current_data).value
if size == 0:
return None
# Read the string
filename = (ctypes.c_char * size).from_address(self.current_data + 4)[:]
try:
filename = filename.decode('utf16')
except Exception as e:
import pdb;pdb.set_trace()
#ignore decode error
pass
self.current_data += (size + 4)
return filename
def reset_buffer(self):
ctypes.memmove(self.data_addr, "\x00" * 0x1000, 0x1000)
self.current_data = data_addr + 4
try:
bypass = sys.argv[1] == "/f"
except:
bypass = False
data_addr = hook_ntcreatefile(kdbg, bypass)
fr = FilenameReader(data_addr)
while True:
time.sleep(0.1)
x = fr.get_current_filenames()
fr.reset_buffer()
for f in x:
try:
print(f)
except BaseException as e:
# bypass encode error
print(repr(f))
| {
"repo_name": "sogeti-esec-lab/LKD",
"path": "example/hook_ntcreatefile.py",
"copies": "1",
"size": "5443",
"license": "bsd-3-clause",
"hash": -7455503920256696000,
"line_mean": 35.2866666667,
"line_max": 131,
"alpha_frac": 0.6288811317,
"autogenerated": false,
"ratio": 3.2476133651551313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4376494496855131,
"avg_score": null,
"num_lines": null
} |
""" A demonstration of relationship """
import yarom as Y
import xlrd
Y.connect({'rdf.namespace':'http://example.com/cherries/'})
class SciName(Y.DataObject):
datatypeProperties = ['genus', 'species']
def identifier_augment(self):
return self.make_identifier_from_properties('genus','species')
def defined_augment(self):
return (len(self.genus.values) > 0) or (len(self.species.values) > 0)
class Ref(Y.DataObject):
_ = ['url', 'refentry', 'asserts']
def identifier_augment(self):
return self.make_identifier_from_properties('url','refentry')
def defined_augment(self):
return (len(self.url.values) > 0) and (len(self.refentry.values) > 0)
class Kind(Y.DataObject):
""" Sort-of like a class """
objectProperties = [{'name':'subkind_of'}]
class CherryCultivar(Y.DataObject):
_ = ['name','height','spread']
Y.remap()
fruitkind = Kind(key='Fruit')
drupekind = Kind(key='Drupe', subkind_of=fruitkind)
cherrykind = Kind(key='Cherry', subkind_of=drupekind)
cherrykind.relate('scientific_name', SciName(genus="Prunus"))
s = xlrd.open_workbook('cherry.xls').sheets()[0]
for row in range(1, s.nrows):
print("ROW",row)
name = s.cell(row, 0).value
height = s.cell(row, 1).value
try:
height = Y.Quantity.parse(height)
except:
height = height
spread = s.cell(row, 2).value
ref = s.cell(row, 3).value
refurl = s.cell(row, 4).value
name_key = name.replace(' ', '_').replace('(',';')
cult = CherryCultivar(key=name_key, name=name, height=height, spread=spread)
prop = cherrykind.relate('cultivar', cult)
Ref(url=refurl, refentry=ref).asserts(prop.rel())
Y.remap()
Y.print_graph(cherrykind.get_defined_component())
| {
"repo_name": "mwatts15/YAROM",
"path": "examples/cherry/cherry.py",
"copies": "1",
"size": "1743",
"license": "bsd-3-clause",
"hash": -5317273855962437000,
"line_mean": 29.5789473684,
"line_max": 80,
"alpha_frac": 0.6546184739,
"autogenerated": false,
"ratio": 2.9897084048027445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41443268787027443,
"avg_score": null,
"num_lines": null
} |
''' A demonstration of the neural network machine learning algorithm. '''
import random
import nlplib
# Neural network connection weights are initialized pseudorandomly, this call makes the results deterministic.
# This isn't necessary but, can be useful for testing.
random.seed(0)
if __name__ == '__main__' :
nn = nlplib.NeuralNetwork(['a', 'b', 'c'], 4, ['d', 'e', 'f'],
name='some neural network')
# These scores are pretty worthless because the network hasn't been trained yet.
print('before training')
for score in nn.predict(('a', 'b')) :
print(score)
print()
# Do some training!
rate = 0.2
for _ in range(100) :
nn.train(('a', 'b'), ('f',), rate=rate)
nn.train(('b'), ('e',), rate=rate)
# "f" gets the highest score here, as expected.
print('testing a and b')
for score in nlplib.Scored(nn.predict(('a', 'b'))).sorted() :
print(score)
print()
# "e" gets the highest score here.
print('testing only b')
for score in nlplib.Scored(nn.predict(('b',))).sorted() :
print(score)
print()
# "f" is a reasonable guess, seeing as the network has never seen "a" on its own before.
print('testing only a')
for score in nlplib.Scored(nn.predict(('a',))).sorted() :
print(score)
print()
# Storing the network in the database is pretty straight forward.
db = nlplib.Database()
with db as session :
session.add(nn)
with db as session :
# Here we retrieve the network from the database. The shortened form <session.access.nn> can also be used here.
nn_from_db = session.access.neural_network('some neural network')
print('testing a and b, again')
for score in nlplib.Scored(nn_from_db.predict(('a', 'b'))).sorted() :
print(score)
| {
"repo_name": "rectangletangle/nlplib",
"path": "src/nlplib/demos/neuralnetworkusage.py",
"copies": "1",
"size": "1866",
"license": "bsd-2-clause",
"hash": 2036678680077529300,
"line_mean": 28.619047619,
"line_max": 119,
"alpha_frac": 0.6066452304,
"autogenerated": false,
"ratio": 3.6023166023166024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9607527294869758,
"avg_score": 0.02028690756936894,
"num_lines": 63
} |
"""A demonstration of the ``Session`` API."""
from __future__ import print_function
import oraide
EARNESTNESS = """\
ALGERNON: Well, that is exactly what dentists always do. Now, go on!
Tell me the whole thing. I may mention that I have always
suspected you of being a confirmed and secret Bunburyist; and
I am quite sure of it now.
JACK: Bunburyist? What on earth do you mean by a Bunburyist?
ALGERNON: I'll reveal to you the meaning of that incomparable expression
as soon as you are kind enough to inform me why you are Ernest
in town and Jack in the country.
JACK: Well, produce my cigarette case first.
ALGERNON: Here it is. Now produce your explanation, and pray make it
improbable.
"""
def main():
s = oraide.Session('oraide-example')
s.enter("vim")
s.enter('i')
with s.auto_advance():
print("Typing {} characters".format(len(EARNESTNESS)))
for line in EARNESTNESS.splitlines():
s.enter(line)
s.send_keys(oraide.keys.escape, literal=False)
s.enter(':q!')
s.enter('exit')
if __name__ == '__main__':
main()
| {
"repo_name": "ddbeck/oraide",
"path": "examples/session.py",
"copies": "1",
"size": "1145",
"license": "bsd-3-clause",
"hash": 4943809093727507000,
"line_mean": 26.2619047619,
"line_max": 72,
"alpha_frac": 0.6489082969,
"autogenerated": false,
"ratio": 3.347953216374269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9492452342198432,
"avg_score": 0.0008818342151675485,
"num_lines": 42
} |
# A demo of a fairly complex dialog.
#
# Features:
# * Uses a "dynamic dialog resource" to build the dialog.
# * Uses a ListView control.
# * Dynamically resizes content.
# * Uses a second worker thread to fill the list.
# * Demostrates support for windows XP themes.
# If you are on Windows XP, and specify a '--noxp' argument, you will see:
# * alpha-blend issues with icons
# * The buttons are "old" style, rather than based on the XP theme.
# Hence, using:
# import winxpgui as win32gui
# is recommened.
# Please report any problems.
import sys
if "--noxp" in sys.argv:
import win32gui
else:
import winxpgui as win32gui
import win32api
import win32con, winerror
import struct, array
import commctrl
import Queue
import os
IDC_SEARCHTEXT = 1024
IDC_BUTTON_SEARCH = 1025
IDC_BUTTON_DISPLAY = 1026
IDC_LISTBOX = 1027
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.items():
if not self.__dict__.has_key(name):
raise ValueError, "LVITEM structures do not have an item '%s'" % (name,)
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and not self.__dict__.has_key(attr):
raise AttributeError, attr
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
str_buf = array.array("c", val+'\0')
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return apply(struct.pack, (full_fmt,) + tuple(vals) )
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
class DemoWindowBase:
def __init__(self):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
def _RegisterWndClass(self):
className = "PythonDocSearch"
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app=win32api.GetModuleHandle(None)
try:
wc.hIcon=win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error, err_info:
if err_info[0]!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_THICKFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
title = "Dynamic Dialog Demo"
# Window frame and title
dlg = [ [title, (0, 0, 210, 250), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, "Enter something", -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Fill List", IDC_BUTTON_SEARCH, (5, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Display", IDC_BUTTON_DISPLAY, (100, 35, 50, 14), s])
# List control.
# Can't make this work :(
## s = cs | win32con.WS_TABSTOP
## dlg.append(['SysListView32', "Title", IDC_LISTBOX, (5, 505, 200, 200), s])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
WM_SEARCH_RESULT: self.OnSearchResult,
WM_SEARCH_FINISHED: self.OnSearchFinished,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def _SetupList(self):
child_style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | win32con.WS_HSCROLL | win32con.WS_VSCROLL
child_style |= commctrl.LVS_SINGLESEL | commctrl.LVS_SHOWSELALWAYS | commctrl.LVS_REPORT
self.hwndList = win32gui.CreateWindow("SysListView32", None, child_style, 0, 0, 100, 100, self.hwnd, IDC_LISTBOX, self.hinst, None)
child_ex_style = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETEXTENDEDLISTVIEWSTYLE, 0, 0)
child_ex_style |= commctrl.LVS_EX_FULLROWSELECT
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETEXTENDEDLISTVIEWSTYLE, 0, child_ex_style)
# Add an image list - use the builtin shell folder icon - this
# demonstrates the problem with alpha-blending of icons on XP if
# winxpgui is not used in place of win32gui.
il = win32gui.ImageList_Create(
win32api.GetSystemMetrics(win32con.SM_CXSMICON),
win32api.GetSystemMetrics(win32con.SM_CYSMICON),
commctrl.ILC_COLOR32 | commctrl.ILC_MASK,
1, # initial size
0) # cGrow
shell_dll = os.path.join(win32api.GetSystemDirectory(), "shell32.dll")
large, small = win32gui.ExtractIconEx(shell_dll, 4, 1)
win32gui.ImageList_ReplaceIcon(il, -1, small[0])
win32gui.DestroyIcon(small[0])
win32gui.DestroyIcon(large[0])
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETIMAGELIST,
commctrl.LVSIL_SMALL, il)
# Setup the list control columns.
lvc = LVCOLUMN(mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | commctrl.LVCF_SUBITEM)
lvc.fmt = commctrl.LVCFMT_LEFT
lvc.iSubItem = 1
lvc.text = "Title"
lvc.cx = 200
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
lvc.iSubItem = 0
lvc.text = "Order"
lvc.cx = 50
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
win32gui.UpdateWindow(self.hwnd)
def ClearListItems(self):
win32gui.SendMessage(self.hwndList, commctrl.LVM_DELETEALLITEMS)
self.list_data = {}
def AddListItem(self, data, *columns):
num_items = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETITEMCOUNT)
item = LVITEM(text=columns[0], iItem = num_items)
new_index = win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTITEM, 0, item.toparam())
col_no = 1
for col in columns[1:]:
item = LVITEM(text=col, iItem = new_index, iSubItem = col_no)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETITEM, 0, item.toparam())
col_no += 1
self.list_data[new_index] = data
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)/2, (dt_b-dt_t)/2) )
win32gui.MoveWindow(hwnd, centre_x-(r/2), centre_y-(b/2), r-l, b-t, 0)
self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l,b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_DISPLAY)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
# The list control
win32gui.MoveWindow(self.hwndList, 0, list_y, cx, cy-list_y, repaint)
# The last column of the list control.
new_width = cx - win32gui.SendMessage(self.hwndList, commctrl.LVM_GETCOLUMNWIDTH, 0)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETCOLUMNWIDTH, 1, new_width)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnSearchResult(self, hwnd, msg, wparam, lparam):
try:
while 1:
params = self.result_queue.get(0)
apply(self.AddListItem, params)
except Queue.Empty:
pass
def OnSearchFinished(self, hwnd, msg, wparam, lparam):
print "OnSearchFinished"
def OnNotify(self, hwnd, msg, wparam, lparam):
format = "iiiiiiiiiii"
buf = win32gui.PyMakeBuffer(struct.calcsize(format), lparam)
hwndFrom, idFrom, code, iItem, iSubItem, uNewState, uOldState, uChanged, actionx, actiony, lParam \
= struct.unpack(format, buf)
# *sigh* - work around a problem with old commctrl modules, which had a
# bad value for PY_OU, which therefore cause most "control notification"
# messages to be wrong.
# Code that needs to work with both pre and post pywin32-204 must do
# this too.
code += commctrl.PY_0U
if code == commctrl.NM_DBLCLK:
print "Double click on item", iItem+1
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_SEARCH:
self.ClearListItems()
def fill_slowly(q, hwnd):
import time
for i in range(20):
q.put(("whatever", str(i+1), "Search result " + str(i) ))
win32gui.PostMessage(hwnd, WM_SEARCH_RESULT, 0, 0)
time.sleep(.25)
win32gui.PostMessage(hwnd, WM_SEARCH_FINISHED, 0, 0)
import threading
self.result_queue = Queue.Queue()
thread = threading.Thread(target = fill_slowly, args=(self.result_queue, self.hwnd) )
thread.start()
elif id == IDC_BUTTON_DISPLAY:
print "Display button selected"
sel = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETNEXTITEM, -1, commctrl.LVNI_SELECTED)
print "The selected item is", sel+1
# These function differ based on how the window is used, so may be overridden
def OnClose(self, hwnd, msg, wparam, lparam):
raise NotImplementedError
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
# An implementation suitable for use with the Win32 Window functions (ie, not
# a true dialog)
class DemoWindow(DemoWindowBase):
def CreateWindow(self):
# Create the window via CreateDialogBoxIndirect - it can then
# work as a "normal" window, once a message loop is established.
self._DoCreate(win32gui.CreateDialogIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.DestroyWindow(hwnd)
# We need to arrange to a WM_QUIT message to be sent to our
# PumpMessages() loop.
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32gui.PostQuitMessage(0) # Terminate the app.
# An implementation suitable for use with the Win32 Dialog functions.
class DemoDialog(DemoWindowBase):
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def DemoModal():
w=DemoDialog()
w.DoModal()
def DemoCreateWindow():
w=DemoWindow()
w.CreateWindow()
# PumpMessages runs until PostQuitMessage() is called by someone.
win32gui.PumpMessages()
if __name__=='__main__':
DemoModal()
DemoCreateWindow()
| {
"repo_name": "nvoron23/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/Demos/win32gui_dialog.py",
"copies": "17",
"size": "14934",
"license": "apache-2.0",
"hash": 292307085896390500,
"line_mean": 38.9304812834,
"line_max": 172,
"alpha_frac": 0.6026516673,
"autogenerated": false,
"ratio": 3.2901520158625246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A demo of a fairly complex dialog.
#
# Features:
# * Uses a "dynamic dialog resource" to build the dialog.
# * Uses a ListView control.
# * Dynamically resizes content.
# * Uses a second worker thread to fill the list.
# * Demostrates support for windows XP themes.
# If you are on Windows XP, and specify a '--noxp' argument, you will see:
# * alpha-blend issues with icons
# * The buttons are "old" style, rather than based on the XP theme.
# Hence, using:
# import winxpgui as win32gui
# is recommened.
# Please report any problems.
import sys
if "--noxp" in sys.argv:
import win32gui
else:
import winxpgui as win32gui
import win32gui_struct
import win32api
import win32con, winerror
import struct, array
import commctrl
import Queue
import os
IDC_SEARCHTEXT = 1024
IDC_BUTTON_SEARCH = 1025
IDC_BUTTON_DISPLAY = 1026
IDC_LISTBOX = 1027
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.iteritems():
if name not in self.__dict__:
raise ValueError("LVITEM structures do not have an item '%s'" % (name,))
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and attr not in self.__dict__:
raise AttributeError(attr)
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
# Note this demo still works with byte strings. An
# alternate strategy would be to use unicode natively
# and use the 'W' version of the messages - eg,
# LVM_SETITEMW etc.
val = val + "\0"
if isinstance(val, unicode):
val = val.encode("mbcs")
str_buf = array.array("b", val)
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return struct.pack(*(full_fmt,) + tuple(vals))
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
class DemoWindowBase:
def __init__(self):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
def _RegisterWndClass(self):
className = "PythonDocSearch"
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app=win32api.GetModuleHandle(None)
try:
wc.hIcon=win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error, err_info:
if err_info.winerror!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_THICKFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
title = "Dynamic Dialog Demo"
# Window frame and title
dlg = [ [title, (0, 0, 210, 250), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, "Enter something", -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Fill List", IDC_BUTTON_SEARCH, (5, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Display", IDC_BUTTON_DISPLAY, (100, 35, 50, 14), s])
# List control.
# Can't make this work :(
## s = cs | win32con.WS_TABSTOP
## dlg.append(['SysListView32', "Title", IDC_LISTBOX, (5, 505, 200, 200), s])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
WM_SEARCH_RESULT: self.OnSearchResult,
WM_SEARCH_FINISHED: self.OnSearchFinished,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def _SetupList(self):
child_style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | win32con.WS_HSCROLL | win32con.WS_VSCROLL
child_style |= commctrl.LVS_SINGLESEL | commctrl.LVS_SHOWSELALWAYS | commctrl.LVS_REPORT
self.hwndList = win32gui.CreateWindow("SysListView32", None, child_style, 0, 0, 100, 100, self.hwnd, IDC_LISTBOX, self.hinst, None)
child_ex_style = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETEXTENDEDLISTVIEWSTYLE, 0, 0)
child_ex_style |= commctrl.LVS_EX_FULLROWSELECT
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETEXTENDEDLISTVIEWSTYLE, 0, child_ex_style)
# Add an image list - use the builtin shell folder icon - this
# demonstrates the problem with alpha-blending of icons on XP if
# winxpgui is not used in place of win32gui.
il = win32gui.ImageList_Create(
win32api.GetSystemMetrics(win32con.SM_CXSMICON),
win32api.GetSystemMetrics(win32con.SM_CYSMICON),
commctrl.ILC_COLOR32 | commctrl.ILC_MASK,
1, # initial size
0) # cGrow
shell_dll = os.path.join(win32api.GetSystemDirectory(), "shell32.dll")
large, small = win32gui.ExtractIconEx(shell_dll, 4, 1)
win32gui.ImageList_ReplaceIcon(il, -1, small[0])
win32gui.DestroyIcon(small[0])
win32gui.DestroyIcon(large[0])
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETIMAGELIST,
commctrl.LVSIL_SMALL, il)
# Setup the list control columns.
lvc = LVCOLUMN(mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | commctrl.LVCF_SUBITEM)
lvc.fmt = commctrl.LVCFMT_LEFT
lvc.iSubItem = 1
lvc.text = "Title"
lvc.cx = 200
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
lvc.iSubItem = 0
lvc.text = "Order"
lvc.cx = 50
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
win32gui.UpdateWindow(self.hwnd)
def ClearListItems(self):
win32gui.SendMessage(self.hwndList, commctrl.LVM_DELETEALLITEMS)
self.list_data = {}
def AddListItem(self, data, *columns):
num_items = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETITEMCOUNT)
item = LVITEM(text=columns[0], iItem = num_items)
new_index = win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTITEM, 0, item.toparam())
col_no = 1
for col in columns[1:]:
item = LVITEM(text=col, iItem = new_index, iSubItem = col_no)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETITEM, 0, item.toparam())
col_no += 1
self.list_data[new_index] = data
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l,b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_DISPLAY)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
# The list control
win32gui.MoveWindow(self.hwndList, 0, list_y, cx, cy-list_y, repaint)
# The last column of the list control.
new_width = cx - win32gui.SendMessage(self.hwndList, commctrl.LVM_GETCOLUMNWIDTH, 0)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETCOLUMNWIDTH, 1, new_width)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnSearchResult(self, hwnd, msg, wparam, lparam):
try:
while 1:
params = self.result_queue.get(0)
self.AddListItem(*params)
except Queue.Empty:
pass
def OnSearchFinished(self, hwnd, msg, wparam, lparam):
print "OnSearchFinished"
def OnNotify(self, hwnd, msg, wparam, lparam):
info = win32gui_struct.UnpackNMITEMACTIVATE(lparam)
if info.code == commctrl.NM_DBLCLK:
print "Double click on item", info.iItem+1
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_SEARCH:
self.ClearListItems()
def fill_slowly(q, hwnd):
import time
for i in range(20):
q.put(("whatever", str(i+1), "Search result " + str(i) ))
win32gui.PostMessage(hwnd, WM_SEARCH_RESULT, 0, 0)
time.sleep(.25)
win32gui.PostMessage(hwnd, WM_SEARCH_FINISHED, 0, 0)
import threading
self.result_queue = Queue.Queue()
thread = threading.Thread(target = fill_slowly, args=(self.result_queue, self.hwnd) )
thread.start()
elif id == IDC_BUTTON_DISPLAY:
print "Display button selected"
sel = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETNEXTITEM, -1, commctrl.LVNI_SELECTED)
print "The selected item is", sel+1
# These function differ based on how the window is used, so may be overridden
def OnClose(self, hwnd, msg, wparam, lparam):
raise NotImplementedError
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
# An implementation suitable for use with the Win32 Window functions (ie, not
# a true dialog)
class DemoWindow(DemoWindowBase):
def CreateWindow(self):
# Create the window via CreateDialogBoxIndirect - it can then
# work as a "normal" window, once a message loop is established.
self._DoCreate(win32gui.CreateDialogIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.DestroyWindow(hwnd)
# We need to arrange to a WM_QUIT message to be sent to our
# PumpMessages() loop.
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32gui.PostQuitMessage(0) # Terminate the app.
# An implementation suitable for use with the Win32 Dialog functions.
class DemoDialog(DemoWindowBase):
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def DemoModal():
w=DemoDialog()
w.DoModal()
def DemoCreateWindow():
w=DemoWindow()
w.CreateWindow()
# PumpMessages runs until PostQuitMessage() is called by someone.
win32gui.PumpMessages()
if __name__=='__main__':
DemoModal()
DemoCreateWindow()
| {
"repo_name": "zhanqxun/cv_fish",
"path": "win32/Demos/win32gui_dialog.py",
"copies": "4",
"size": "15197",
"license": "apache-2.0",
"hash": -3355804236979198000,
"line_mean": 38.7426273458,
"line_max": 172,
"alpha_frac": 0.5837336316,
"autogenerated": false,
"ratio": 3.37111801242236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.595485164402236,
"avg_score": null,
"num_lines": null
} |
# A demo of an Application object that has some custom print functionality.
# If you desire, you can also run this from inside Pythonwin, in which
# case it will do the demo inside the Pythonwin environment.
# This sample was contributed by Roger Burnham.
from pywin.mfc import docview, dialog, afxres
from pywin.framework import app
import win32con
import win32ui
import win32api
PRINTDLGORD = 1538
IDC_PRINT_MAG_EDIT = 1010
class PrintDemoTemplate(docview.DocTemplate):
def _SetupSharedMenu_(self):
pass
class PrintDemoView(docview.ScrollView):
def OnInitialUpdate(self):
ret = self._obj_.OnInitialUpdate()
self.colors = {'Black' : (0x00<<0) + (0x00<<8) + (0x00<<16),
'Red' : (0xff<<0) + (0x00<<8) + (0x00<<16),
'Green' : (0x00<<0) + (0xff<<8) + (0x00<<16),
'Blue' : (0x00<<0) + (0x00<<8) + (0xff<<16),
'Cyan' : (0x00<<0) + (0xff<<8) + (0xff<<16),
'Magenta': (0xff<<0) + (0x00<<8) + (0xff<<16),
'Yellow' : (0xff<<0) + (0xff<<8) + (0x00<<16),
}
self.pens = {}
for name, color in self.colors.items():
self.pens[name] = win32ui.CreatePen(win32con.PS_SOLID,
5, color)
self.pen = None
self.size = (128,128)
self.SetScaleToFitSize(self.size)
self.HookCommand(self.OnFilePrint, afxres.ID_FILE_PRINT)
self.HookCommand(self.OnFilePrintPreview,
win32ui.ID_FILE_PRINT_PREVIEW)
return ret
def OnDraw(self, dc):
oldPen = None
x,y = self.size
delta = 2
colors = self.colors.keys()
colors.sort()
colors = colors*2
for color in colors:
if oldPen is None:
oldPen = dc.SelectObject(self.pens[color])
else:
dc.SelectObject(self.pens[color])
dc.MoveTo(( delta, delta))
dc.LineTo((x-delta, delta))
dc.LineTo((x-delta, y-delta))
dc.LineTo(( delta, y-delta))
dc.LineTo(( delta, delta))
delta = delta + 4
if x-delta <= 0 or y-delta <= 0:
break
dc.SelectObject(oldPen)
def OnPrepareDC (self, dc, pInfo):
if dc.IsPrinting():
mag = self.prtDlg['mag']
dc.SetMapMode(win32con.MM_ANISOTROPIC);
dc.SetWindowOrg((0, 0))
dc.SetWindowExt((1, 1))
dc.SetViewportOrg((0, 0))
dc.SetViewportExt((mag, mag))
def OnPreparePrinting(self, pInfo):
flags = (win32ui.PD_USEDEVMODECOPIES|
win32ui.PD_PAGENUMS|
win32ui.PD_NOPAGENUMS|
win32ui.PD_NOSELECTION)
self.prtDlg = ImagePrintDialog(pInfo, PRINTDLGORD, flags)
pInfo.SetPrintDialog(self.prtDlg)
pInfo.SetMinPage(1)
pInfo.SetMaxPage(1)
pInfo.SetFromPage(1)
pInfo.SetToPage(1)
ret = self.DoPreparePrinting(pInfo)
return ret
def OnBeginPrinting(self, dc, pInfo):
return self._obj_.OnBeginPrinting(dc, pInfo)
def OnEndPrinting(self, dc, pInfo):
del self.prtDlg
return self._obj_.OnEndPrinting(dc, pInfo)
def OnFilePrintPreview(self, *arg):
self._obj_.OnFilePrintPreview()
def OnFilePrint(self, *arg):
self._obj_.OnFilePrint()
def OnPrint(self, dc, pInfo):
doc = self.GetDocument()
metrics = dc.GetTextMetrics()
cxChar = metrics['tmAveCharWidth']
cyChar = metrics['tmHeight']
left, top, right, bottom = pInfo.GetDraw()
dc.TextOut(0, 2*cyChar, doc.GetTitle())
top = top + (7*cyChar)/2
dc.MoveTo(left, top)
dc.LineTo(right, top)
top = top + cyChar
# this seems to have not effect...
# get what I want with the dc.SetWindowOrg calls
pInfo.SetDraw((left, top, right, bottom))
dc.SetWindowOrg((0, -top))
self.OnDraw(dc)
dc.SetTextAlign(win32con.TA_LEFT|win32con.TA_BOTTOM)
rect = self.GetWindowRect()
rect = self.ScreenToClient(rect)
height = (rect[3]-rect[1])
dc.SetWindowOrg((0, -(top+height+cyChar)))
dc.MoveTo(left, 0)
dc.LineTo(right, 0)
x = 0
y = (3*cyChar)/2
dc.TextOut(x, y, doc.GetTitle())
y = y + cyChar
class PrintDemoApp(app.CApp):
def __init__(self):
app.CApp.__init__(self)
def InitInstance(self):
template = PrintDemoTemplate(None, None,
None, PrintDemoView)
self.AddDocTemplate(template)
self._obj_.InitMDIInstance()
self.LoadMainFrame()
doc = template.OpenDocumentFile(None)
doc.SetTitle('Custom Print Document')
class ImagePrintDialog(dialog.PrintDialog):
sectionPos = 'Image Print Demo'
def __init__(self, pInfo, dlgID, flags=win32ui.PD_USEDEVMODECOPIES):
dialog.PrintDialog.__init__(self, pInfo, dlgID, flags=flags)
mag = win32ui.GetProfileVal(self.sectionPos,
'Document Magnification',
0)
if mag <= 0:
mag = 2
win32ui.WriteProfileVal(self.sectionPos,
'Document Magnification',
mag)
self['mag'] = mag
def OnInitDialog(self):
self.magCtl = self.GetDlgItem(IDC_PRINT_MAG_EDIT)
self.magCtl.SetWindowText(`self['mag']`)
return dialog.PrintDialog.OnInitDialog(self)
def OnOK(self):
dialog.PrintDialog.OnOK(self)
strMag = self.magCtl.GetWindowText()
try:
self['mag'] = string.atoi(strMag)
except:
pass
win32ui.WriteProfileVal(self.sectionPos,
'Document Magnification',
self['mag'])
if __name__=='__main__':
# Running under Pythonwin
def test():
template = PrintDemoTemplate(None, None,
None, PrintDemoView)
template.OpenDocumentFile(None)
test()
else:
app = PrintDemoApp()
| {
"repo_name": "espadrine/opera",
"path": "chromium/src/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/app/customprint.py",
"copies": "17",
"size": "6401",
"license": "bsd-3-clause",
"hash": 464001732823876400,
"line_mean": 31.9948453608,
"line_max": 75,
"alpha_frac": 0.5366349008,
"autogenerated": false,
"ratio": 3.5423353624792475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A Demo of a service that takes advantage of the additional notifications
# available in later Windows versions.
# Note that all output is written as event log entries - so you must install
# and start the service, then look at the event log for messages as events
# are generated.
# Events are generated for USB device insertion and removal, power state
# changes and hardware profile events - so try putting your computer to
# sleep and waking it, inserting a memory stick, etc then check the event log
import win32serviceutil, win32service
import win32event
import servicemanager
# Most event notification support lives around win32gui
import win32gui, win32gui_struct, win32con
GUID_DEVINTERFACE_USB_DEVICE = "{A5DCBF10-6530-11D2-901F-00C04FB951ED}"
class EventDemoService(win32serviceutil.ServiceFramework):
_svc_name_ = "PyServiceEventDemo"
_svc_display_name_ = "Python Service Event Demo"
_svc_description_ = "Demonstrates a Python service which takes advantage of the extra notifications"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
# register for a device notification - we pass our service handle
# instead of a window handle.
filter = win32gui_struct.PackDEV_BROADCAST_DEVICEINTERFACE(
GUID_DEVINTERFACE_USB_DEVICE)
self.hdn = win32gui.RegisterDeviceNotification(self.ssh, filter,
win32con.DEVICE_NOTIFY_SERVICE_HANDLE)
# Override the base class so we can accept additional events.
def GetAcceptedControls(self):
# say we accept them all.
rc = win32serviceutil.ServiceFramework.GetAcceptedControls(self)
rc |= win32service.SERVICE_ACCEPT_PARAMCHANGE \
| win32service.SERVICE_ACCEPT_NETBINDCHANGE \
| win32service.SERVICE_CONTROL_DEVICEEVENT \
| win32service.SERVICE_ACCEPT_HARDWAREPROFILECHANGE \
| win32service.SERVICE_ACCEPT_POWEREVENT \
| win32service.SERVICE_ACCEPT_SESSIONCHANGE
return rc
# All extra events are sent via SvcOtherEx (SvcOther remains as a
# function taking only the first args for backwards compat)
def SvcOtherEx(self, control, event_type, data):
# This is only showing a few of the extra events - see the MSDN
# docs for "HandlerEx callback" for more info.
if control == win32service.SERVICE_CONTROL_DEVICEEVENT:
info = win32gui_struct.UnpackDEV_BROADCAST(data)
msg = "A device event occurred: %x - %s" % (event_type, info)
elif control == win32service.SERVICE_CONTROL_HARDWAREPROFILECHANGE:
msg = "A hardware profile changed: type=%s, data=%s" % (event_type, data)
elif control == win32service.SERVICE_CONTROL_POWEREVENT:
msg = "A power event: setting %s" % data
elif control == win32service.SERVICE_CONTROL_SESSIONCHANGE:
# data is a single elt tuple, but this could potentially grow
# in the future if the win32 struct does
msg = "Session event: type=%s, data=%s" % (event_type, data)
else:
msg = "Other event: code=%d, type=%s, data=%s" \
% (control, event_type, data)
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
0xF000, # generic message
(msg, '')
)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
# do nothing at all - just wait to be stopped
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
# Write a stop message.
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STOPPED,
(self._svc_name_, '')
)
if __name__=='__main__':
win32serviceutil.HandleCommandLine(EventDemoService)
| {
"repo_name": "zhanqxun/cv_fish",
"path": "win32/Demos/service/serviceEvents.py",
"copies": "4",
"size": "4215",
"license": "apache-2.0",
"hash": 5142379637609959000,
"line_mean": 45.8977272727,
"line_max": 104,
"alpha_frac": 0.6491103203,
"autogenerated": false,
"ratio": 4.1486220472440944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6797732367544095,
"avg_score": null,
"num_lines": null
} |
# A demo of basic SSPI authentication.
# There is a 'client' context and a 'server' context - typically these will
# be on different machines (here they are in the same process, but the same
# concepts apply)
import sspi
import win32security, sspicon, win32api
def lookup_ret_code(err):
for k,v in list(sspicon.__dict__.items()):
if k[0:6] in ('SEC_I_','SEC_E_') and v==err:
return k
"""
pkg_name='Kerberos'
sspiclient=SSPIClient(pkg_name, win32api.GetUserName(), ## target spn is ourself
None, None, ## use none for client name and authentication information for current context
## u'username', (u'username',u'domain.com',u'passwd'),
sspicon.ISC_REQ_INTEGRITY|sspicon.ISC_REQ_SEQUENCE_DETECT|sspicon.ISC_REQ_REPLAY_DETECT| \
sspicon.ISC_REQ_DELEGATE|sspicon.ISC_REQ_CONFIDENTIALITY|sspicon.ISC_REQ_USE_SESSION_KEY)
sspiserver=SSPIServer(pkg_name, None,
sspicon.ASC_REQ_INTEGRITY|sspicon.ASC_REQ_SEQUENCE_DETECT|sspicon.ASC_REQ_REPLAY_DETECT| \
sspicon.ASC_REQ_DELEGATE|sspicon.ASC_REQ_CONFIDENTIALITY|sspicon.ASC_REQ_STREAM|sspicon.ASC_REQ_USE_SESSION_KEY)
"""
pkg_name='NTLM'
# Setup the 2 contexts.
sspiclient=sspi.ClientAuth(pkg_name)
sspiserver=sspi.ServerAuth(pkg_name)
# Perform the authentication dance, each loop exchanging more information
# on the way to completing authentication.
sec_buffer=None
while 1:
err, sec_buffer = sspiclient.authorize(sec_buffer)
err, sec_buffer = sspiserver.authorize(sec_buffer)
if err==0:
break
# The server can now impersonate the client. In this demo the 2 users will
# always be the same.
sspiserver.ctxt.ImpersonateSecurityContext()
print('Impersonated user: ',win32api.GetUserNameEx(win32api.NameSamCompatible))
sspiserver.ctxt.RevertSecurityContext()
print('Reverted to self: ',win32api.GetUserName())
pkg_size_info=sspiclient.ctxt.QueryContextAttributes(sspicon.SECPKG_ATTR_SIZES)
# Now sign some data
msg='some data to be encrypted ......'
sigsize=pkg_size_info['MaxSignature']
sigbuf=win32security.PySecBufferDescType()
sigbuf.append(win32security.PySecBufferType(len(msg), sspicon.SECBUFFER_DATA))
sigbuf.append(win32security.PySecBufferType(sigsize, sspicon.SECBUFFER_TOKEN))
sigbuf[0].Buffer=msg
sspiclient.ctxt.MakeSignature(0,sigbuf,1)
sspiserver.ctxt.VerifySignature(sigbuf,1)
# And finally encrypt some.
trailersize=pkg_size_info['SecurityTrailer']
encbuf=win32security.PySecBufferDescType()
encbuf.append(win32security.PySecBufferType(len(msg), sspicon.SECBUFFER_DATA))
encbuf.append(win32security.PySecBufferType(trailersize, sspicon.SECBUFFER_TOKEN))
encbuf[0].Buffer=msg
sspiclient.ctxt.EncryptMessage(0,encbuf,1)
print('Encrypted data:',repr(encbuf[0].Buffer))
sspiserver.ctxt.DecryptMessage(encbuf,1)
print('Unencrypted data:',encbuf[0].Buffer)
| {
"repo_name": "zooba/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/Demos/security/sspi/simple_auth.py",
"copies": "6",
"size": "2806",
"license": "apache-2.0",
"hash": 2881978227339506700,
"line_mean": 38.5211267606,
"line_max": 120,
"alpha_frac": 0.7647897363,
"autogenerated": false,
"ratio": 2.941299790356394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.026333638168413855,
"num_lines": 71
} |
# A demo of gradients through scipy.integrate.odeint,
# estimating the dynamics of a system given a trajectory.
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as npo
import autograd.numpy as np
from autograd import grad
from autograd.scipy.integrate import odeint
from autograd.builtins import tuple
from autograd.misc.optimizers import adam
import autograd.numpy.random as npr
N = 30 # Dataset size
D = 2 # Data dimension
max_T = 1.5
# Two-dimensional damped oscillator
def func(y, t0, A):
return np.dot(y**3, A)
def nn_predict(inputs, t, params):
for W, b in params:
outputs = np.dot(inputs, W) + b
inputs = np.maximum(0, outputs)
return outputs
def init_nn_params(scale, layer_sizes, rs=npr.RandomState(0)):
"""Build a list of (weights, biases) tuples, one for each layer."""
return [(rs.randn(insize, outsize) * scale, # weight matrix
rs.randn(outsize) * scale) # bias vector
for insize, outsize in zip(layer_sizes[:-1], layer_sizes[1:])]
# Define neural ODE model.
def ode_pred(params, y0, t):
return odeint(nn_predict, y0, t, tuple((params,)), rtol=0.01)
def L1_loss(pred, targets):
return np.mean(np.abs(pred - targets))
if __name__ == '__main__':
# Generate data from true dynamics.
true_y0 = np.array([2., 0.]).T
t = np.linspace(0., max_T, N)
true_A = np.array([[-0.1, 2.0], [-2.0, -0.1]])
true_y = odeint(func, true_y0, t, args=(true_A,))
def train_loss(params, iter):
pred = ode_pred(params, true_y0, t)
return L1_loss(pred, true_y)
# Set up figure
fig = plt.figure(figsize=(12, 4), facecolor='white')
ax_traj = fig.add_subplot(131, frameon=False)
ax_phase = fig.add_subplot(132, frameon=False)
ax_vecfield = fig.add_subplot(133, frameon=False)
plt.show(block=False)
# Plots data and learned dynamics.
def callback(params, iter, g):
pred = ode_pred(params, true_y0, t)
print("Iteration {:d} train loss {:.6f}".format(
iter, L1_loss(pred, true_y)))
ax_traj.cla()
ax_traj.set_title('Trajectories')
ax_traj.set_xlabel('t')
ax_traj.set_ylabel('x,y')
ax_traj.plot(t, true_y[:, 0], '-', t, true_y[:, 1], 'g-')
ax_traj.plot(t, pred[:, 0], '--', t, pred[:, 1], 'b--')
ax_traj.set_xlim(t.min(), t.max())
ax_traj.set_ylim(-2, 2)
ax_traj.xaxis.set_ticklabels([])
ax_traj.yaxis.set_ticklabels([])
ax_traj.legend()
ax_phase.cla()
ax_phase.set_title('Phase Portrait')
ax_phase.set_xlabel('x')
ax_phase.set_ylabel('y')
ax_phase.plot(true_y[:, 0], true_y[:, 1], 'g-')
ax_phase.plot(pred[:, 0], pred[:, 1], 'b--')
ax_phase.set_xlim(-2, 2)
ax_phase.set_ylim(-2, 2)
ax_phase.xaxis.set_ticklabels([])
ax_phase.yaxis.set_ticklabels([])
ax_vecfield.cla()
ax_vecfield.set_title('Learned Vector Field')
ax_vecfield.set_xlabel('x')
ax_vecfield.set_ylabel('y')
ax_vecfield.xaxis.set_ticklabels([])
ax_vecfield.yaxis.set_ticklabels([])
# vector field plot
y, x = npo.mgrid[-2:2:21j, -2:2:21j]
dydt = nn_predict(np.stack([x, y], -1).reshape(21 * 21, 2), 0,
params).reshape(-1, 2)
mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
dydt = (dydt / mag)
dydt = dydt.reshape(21, 21, 2)
ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black")
ax_vecfield.set_xlim(-2, 2)
ax_vecfield.set_ylim(-2, 2)
fig.tight_layout()
plt.draw()
plt.pause(0.001)
# Train neural net dynamics to match data.
init_params = init_nn_params(0.1, layer_sizes=[D, 150, D])
optimized_params = adam(grad(train_loss), init_params,
num_iters=1000, callback=callback)
| {
"repo_name": "hips/autograd",
"path": "examples/ode_net.py",
"copies": "3",
"size": "4001",
"license": "mit",
"hash": -1364545704255379000,
"line_mean": 31.5284552846,
"line_max": 81,
"alpha_frac": 0.583604099,
"autogenerated": false,
"ratio": 3.019622641509434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5103226740509434,
"avg_score": null,
"num_lines": null
} |
# A demo of the win32rcparser module and using win32gui
import win32gui
import win32api
import win32con
import win32rcparser
import commctrl
import sys, os
this_dir = os.path.abspath(os.path.dirname(__file__))
g_rcname = os.path.abspath(
os.path.join( this_dir, "..", "test", "win32rcparser", "test.rc"))
if not os.path.isfile(g_rcname):
raise RuntimeError("Can't locate test.rc (should be at '%s')" % (g_rcname,))
class DemoWindow:
def __init__(self, dlg_template):
self.dlg_template = dlg_template
def CreateWindow(self):
self._DoCreate(win32gui.CreateDialogIndirect)
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def _DoCreate(self, fn):
message_map = {
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
}
return fn(0, self.dlg_template, 0, message_map)
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
def OnCommand(self, hwnd, msg, wparam, lparam):
# Needed to make OK/Cancel work - no other controls are handled.
id = win32api.LOWORD(wparam)
if id in [win32con.IDOK, win32con.IDCANCEL]:
win32gui.EndDialog(hwnd, id)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
def DemoModal():
# Load the .rc file.
resources = win32rcparser.Parse(g_rcname)
for id, ddef in resources.dialogs.iteritems():
print "Displaying dialog", id
w=DemoWindow(ddef)
w.DoModal()
if __name__=='__main__':
flags = 0
for flag in """ICC_DATE_CLASSES ICC_ANIMATE_CLASS ICC_ANIMATE_CLASS
ICC_BAR_CLASSES ICC_COOL_CLASSES ICC_DATE_CLASSES
ICC_HOTKEY_CLASS ICC_INTERNET_CLASSES ICC_LISTVIEW_CLASSES
ICC_PAGESCROLLER_CLASS ICC_PROGRESS_CLASS ICC_TAB_CLASSES
ICC_TREEVIEW_CLASSES ICC_UPDOWN_CLASS ICC_USEREX_CLASSES
ICC_WIN95_CLASSES """.split():
flags |= getattr(commctrl, flag)
win32gui.InitCommonControlsEx(flags)
# Need to do this go get rich-edit working.
win32api.LoadLibrary("riched20.dll")
DemoModal()
| {
"repo_name": "DavidGuben/rcbplayspokemon",
"path": "app/pywin32-220/win32/Demos/win32rcparser_demo.py",
"copies": "9",
"size": "2749",
"license": "mit",
"hash": 5554341244143640000,
"line_mean": 34.7012987013,
"line_max": 98,
"alpha_frac": 0.6329574391,
"autogenerated": false,
"ratio": 3.1274175199089873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005816915042368611,
"num_lines": 77
} |
# A demo of the win32rcparser module and using win32gui
import win32gui
import win32api
import win32con
import win32rcparser
import commctrl
import sys, os
# We use the .rc file in our 'test' directory.
try:
__file__
except NameError: # pre 2.3
__file__ = sys.argv[0]
this_dir = os.path.abspath(os.path.dirname(__file__))
g_rcname = os.path.abspath(
os.path.join( this_dir, "..", "test", "win32rcparser", "test.rc"))
if not os.path.isfile(g_rcname):
raise RuntimeError, "Can't locate test.rc (should be at '%s')" % (g_rcname,)
class DemoWindow:
def __init__(self, dlg_template):
self.dlg_template = dlg_template
def CreateWindow(self):
self._DoCreate(win32gui.CreateDialogIndirect)
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def _DoCreate(self, fn):
message_map = {
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
}
return fn(0, self.dlg_template, 0, message_map)
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)/2, (dt_b-dt_t)/2) )
win32gui.MoveWindow(hwnd, centre_x-(r/2), centre_y-(b/2), r-l, b-t, 0)
def OnCommand(self, hwnd, msg, wparam, lparam):
# Needed to make OK/Cancel work - no other controls are handled.
id = win32api.LOWORD(wparam)
if id in [win32con.IDOK, win32con.IDCANCEL]:
win32gui.EndDialog(hwnd, id)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
def DemoModal():
# Load the .rc file.
resources = win32rcparser.Parse(g_rcname)
for id, ddef in resources.dialogs.items():
print "Displaying dialog", id
w=DemoWindow(ddef)
w.DoModal()
if __name__=='__main__':
flags = 0
for flag in """ICC_DATE_CLASSES ICC_ANIMATE_CLASS ICC_ANIMATE_CLASS
ICC_BAR_CLASSES ICC_COOL_CLASSES ICC_DATE_CLASSES
ICC_HOTKEY_CLASS ICC_INTERNET_CLASSES ICC_LISTVIEW_CLASSES
ICC_PAGESCROLLER_CLASS ICC_PROGRESS_CLASS ICC_TAB_CLASSES
ICC_TREEVIEW_CLASSES ICC_UPDOWN_CLASS ICC_USEREX_CLASSES
ICC_WIN95_CLASSES """.split():
flags |= getattr(commctrl, flag)
win32gui.InitCommonControlsEx(flags)
# Need to do this go get rich-edit working.
win32api.LoadLibrary("riched20.dll")
DemoModal()
| {
"repo_name": "pekeler/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/Demos/win32rcparser_demo.py",
"copies": "17",
"size": "2862",
"license": "apache-2.0",
"hash": 951010072097081200,
"line_mean": 33.4819277108,
"line_max": 96,
"alpha_frac": 0.6313766597,
"autogenerated": false,
"ratio": 3.1007583965330445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A demo of the win32rcparser module and using win32gui
import win32gui
import win32api
import win32con
import win32rcparser
import commctrl
import sys, os
this_dir = os.path.abspath(os.path.dirname(__file__))
g_rcname = os.path.abspath(
os.path.join( this_dir, "..", "test", "win32rcparser", "test.rc"))
if not os.path.isfile(g_rcname):
raise RuntimeError("Can't locate test.rc (should be at '%s')" % (g_rcname,))
class DemoWindow:
def __init__(self, dlg_template):
self.dlg_template = dlg_template
def CreateWindow(self):
self._DoCreate(win32gui.CreateDialogIndirect)
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def _DoCreate(self, fn):
message_map = {
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
}
return fn(0, self.dlg_template, 0, message_map)
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
def OnCommand(self, hwnd, msg, wparam, lparam):
# Needed to make OK/Cancel work - no other controls are handled.
id = win32api.LOWORD(wparam)
if id in [win32con.IDOK, win32con.IDCANCEL]:
win32gui.EndDialog(hwnd, id)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
def DemoModal():
# Load the .rc file.
resources = win32rcparser.Parse(g_rcname)
for id, ddef in resources.dialogs.iteritems():
print "Displaying dialog", id
w=DemoWindow(ddef)
w.DoModal()
if __name__=='__main__':
flags = 0
for flag in """ICC_DATE_CLASSES ICC_ANIMATE_CLASS ICC_ANIMATE_CLASS
ICC_BAR_CLASSES ICC_COOL_CLASSES ICC_DATE_CLASSES
ICC_HOTKEY_CLASS ICC_INTERNET_CLASSES ICC_LISTVIEW_CLASSES
ICC_PAGESCROLLER_CLASS ICC_PROGRESS_CLASS ICC_TAB_CLASSES
ICC_TREEVIEW_CLASSES ICC_UPDOWN_CLASS ICC_USEREX_CLASSES
ICC_WIN95_CLASSES """.split():
flags |= getattr(commctrl, flag)
win32gui.InitCommonControlsEx(flags)
# Need to do this go get rich-edit working.
win32api.LoadLibrary("riched20.dll")
DemoModal()
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/win32/Demos/win32rcparser_demo.py",
"copies": "2",
"size": "2826",
"license": "bsd-3-clause",
"hash": 4633528497174179000,
"line_mean": 34.7012987013,
"line_max": 98,
"alpha_frac": 0.6157112527,
"autogenerated": false,
"ratio": 3.1752808988764043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9763005792947964,
"avg_score": 0.0055972717256881415,
"num_lines": 77
} |
# A demo of using the RAS API from Python
import sys
import string
import win32ras
# The error raised if we can not
class ConnectionError(Exception):
pass
def Connect(rasEntryName, numRetries = 5):
"""Make a connection to the specified RAS entry.
Returns a tuple of (bool, handle) on success.
- bool is 1 if a new connection was established, or 0 is a connection already existed.
- handle is a RAS HANDLE that can be passed to Disconnect() to end the connection.
Raises a ConnectionError if the connection could not be established.
"""
assert numRetries > 0
for info in win32ras.EnumConnections():
if string.lower(info[1])==string.lower(rasEntryName):
print "Already connected to", rasEntryName
return 0, info[0]
dial_params, have_pw = win32ras.GetEntryDialParams(None, rasEntryName)
if not have_pw:
print "Error: The password is not saved for this connection"
print "Please connect manually selecting the 'save password' option and try again"
sys.exit(1)
print "Connecting to", rasEntryName, "..."
retryCount = numRetries
while retryCount > 0:
rasHandle, errCode = win32ras.Dial(None, None, dial_params, None)
if win32ras.IsHandleValid(rasHandle):
bValid = 1
break
print "Retrying..."
win32api.Sleep(5000)
retryCount = retryCount - 1
if errCode:
raise ConnectionError(errCode, win32ras.GetErrorString(errCode))
return 1, rasHandle
def Disconnect(handle):
if type(handle)==type(''): # have they passed a connection name?
for info in win32ras.EnumConnections():
if string.lower(info[1])==string.lower(handle):
handle = info[0]
break
else:
raise ConnectionError(0, "Not connected to entry '%s'" % handle)
win32ras.HangUp(handle)
usage="""rasutil.py - Utilities for using RAS
Usage:
rasutil [-r retryCount] [-c rasname] [-d rasname]
-r retryCount - Number of times to retry the RAS connection
-c rasname - Connect to the phonebook entry specified by rasname
-d rasname - Disconnect from the phonebook entry specified by rasname
"""
def Usage(why):
print why
print usage
sys.exit(1)
if __name__=='__main__':
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "r:c:d:")
except getopt.error, why:
Usage(why)
retries = 5
if len(args) <> 0:
Usage("Invalid argument")
for opt, val in opts:
if opt=='-c':
Connect(val, retries)
if opt=='-d':
Disconnect(val)
if opt=='-r':
retries = int(val)
| {
"repo_name": "lafayette/JBTT",
"path": "framework/python/Lib/site-packages/win32/scripts/rasutil.py",
"copies": "21",
"size": "2417",
"license": "mit",
"hash": 8349554509861688000,
"line_mean": 26.4659090909,
"line_max": 87,
"alpha_frac": 0.7103847745,
"autogenerated": false,
"ratio": 3.1471354166666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A demo of using the RAS API from Python
import sys
import win32ras
# The error raised if we can not
class ConnectionError(Exception):
pass
def Connect(rasEntryName, numRetries = 5):
"""Make a connection to the specified RAS entry.
Returns a tuple of (bool, handle) on success.
- bool is 1 if a new connection was established, or 0 is a connection already existed.
- handle is a RAS HANDLE that can be passed to Disconnect() to end the connection.
Raises a ConnectionError if the connection could not be established.
"""
assert numRetries > 0
for info in win32ras.EnumConnections():
if info[1].lower()==rasEntryName.lower():
print("Already connected to", rasEntryName)
return 0, info[0]
dial_params, have_pw = win32ras.GetEntryDialParams(None, rasEntryName)
if not have_pw:
print("Error: The password is not saved for this connection")
print("Please connect manually selecting the 'save password' option and try again")
sys.exit(1)
print("Connecting to", rasEntryName, "...")
retryCount = numRetries
while retryCount > 0:
rasHandle, errCode = win32ras.Dial(None, None, dial_params, None)
if win32ras.IsHandleValid(rasHandle):
bValid = 1
break
print("Retrying...")
win32api.Sleep(5000)
retryCount = retryCount - 1
if errCode:
raise ConnectionError(errCode, win32ras.GetErrorString(errCode))
return 1, rasHandle
def Disconnect(handle):
if type(handle)==type(''): # have they passed a connection name?
for info in win32ras.EnumConnections():
if info[1].lower()==handle.lower():
handle = info[0]
break
else:
raise ConnectionError(0, "Not connected to entry '%s'" % handle)
win32ras.HangUp(handle)
usage="""rasutil.py - Utilities for using RAS
Usage:
rasutil [-r retryCount] [-c rasname] [-d rasname]
-r retryCount - Number of times to retry the RAS connection
-c rasname - Connect to the phonebook entry specified by rasname
-d rasname - Disconnect from the phonebook entry specified by rasname
"""
def Usage(why):
print(why)
print(usage)
sys.exit(1)
if __name__=='__main__':
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "r:c:d:")
except getopt.error as why:
Usage(why)
retries = 5
if len(args) != 0:
Usage("Invalid argument")
for opt, val in opts:
if opt=='-c':
Connect(val, retries)
if opt=='-d':
Disconnect(val)
if opt=='-r':
retries = int(val)
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/scripts/rasutil.py",
"copies": "7",
"size": "2388",
"license": "apache-2.0",
"hash": -5255958321600893000,
"line_mean": 26.4482758621,
"line_max": 87,
"alpha_frac": 0.7047738693,
"autogenerated": false,
"ratio": 3.1338582677165356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7338632137016534,
"avg_score": null,
"num_lines": null
} |
# A demo of using the RAS API from Python
import sys
import string
import win32ras
# The error raised if we can not
class ConnectionError(Exception):
pass
def Connect(rasEntryName, numRetries = 5):
"""Make a connection to the specified RAS entry.
Returns a tuple of (bool, handle) on success.
- bool is 1 if a new connection was established, or 0 is a connection already existed.
- handle is a RAS HANDLE that can be passed to Disconnect() to end the connection.
Raises a ConnectionError if the connection could not be established.
"""
assert numRetries > 0
for info in win32ras.EnumConnections():
if string.lower(info[1])==string.lower(rasEntryName):
print "Already connected to", rasEntryName
return 0, info[0]
dial_params, have_pw = win32ras.GetEntryDialParams(None, rasEntryName)
if not have_pw:
print "Error: The password is not saved for this connection"
print "Please connect manually selecting the 'save password' option and try again"
sys.exit(1)
print "Connecting to", rasEntryName, "..."
retryCount = numRetries
while retryCount > 0:
rasHandle, errCode = win32ras.Dial(None, None, dial_params, None)
if win32ras.IsHandleValid(rasHandle):
bValid = 1
break
print "Retrying..."
win32api.Sleep(5000)
retryCount = retryCount - 1
if errCode:
raise ConnectionError(errCode, win32ras.GetErrorString(errCode))
return 1, rasHandle
def Disconnect(handle):
if type(handle)==type(''): # have they passed a connection name?
for info in win32ras.EnumConnections():
if string.lower(info[1])==string.lower(handle):
handle = info[0]
break
else:
raise ConnectionError(0, "Not connected to entry '%s'" % handle)
win32ras.HangUp(handle)
usage="""rasutil.py - Utilities for using RAS
Usage:
rasutil [-r retryCount] [-c rasname] [-d rasname]
-r retryCount - Number of times to retry the RAS connection
-c rasname - Connect to the phonebook entry specified by rasname
-d rasname - Disconnect from the phonebook entry specified by rasname
"""
def Usage(why):
print why
print usage
sys.exit(1)
if __name__=='__main__':
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "r:c:d:")
except getopt.error, why:
Usage(why)
retries = 5
if len(args) <> 0:
Usage("Invalid argument")
for opt, val in opts:
if opt=='-c':
Connect(val, retries)
if opt=='-d':
Disconnect(val)
if opt=='-r':
retries = int(val)
| {
"repo_name": "chvrga/outdoor-explorer",
"path": "java/play-1.4.4/python/Lib/site-packages/win32/scripts/rasutil.py",
"copies": "4",
"size": "2505",
"license": "mit",
"hash": 5243727107563799000,
"line_mean": 26.4659090909,
"line_max": 87,
"alpha_frac": 0.6854291417,
"autogenerated": false,
"ratio": 3.2280927835051547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5913521925205155,
"avg_score": null,
"num_lines": null
} |
# A demo of using the RAS API from Python
import sys
import win32ras
# The error raised if we can not
class ConnectionError(Exception):
pass
def Connect(rasEntryName, numRetries = 5):
"""Make a connection to the specified RAS entry.
Returns a tuple of (bool, handle) on success.
- bool is 1 if a new connection was established, or 0 is a connection already existed.
- handle is a RAS HANDLE that can be passed to Disconnect() to end the connection.
Raises a ConnectionError if the connection could not be established.
"""
assert numRetries > 0
for info in win32ras.EnumConnections():
if info[1].lower()==rasEntryName.lower():
print "Already connected to", rasEntryName
return 0, info[0]
dial_params, have_pw = win32ras.GetEntryDialParams(None, rasEntryName)
if not have_pw:
print "Error: The password is not saved for this connection"
print "Please connect manually selecting the 'save password' option and try again"
sys.exit(1)
print "Connecting to", rasEntryName, "..."
retryCount = numRetries
while retryCount > 0:
rasHandle, errCode = win32ras.Dial(None, None, dial_params, None)
if win32ras.IsHandleValid(rasHandle):
bValid = 1
break
print "Retrying..."
win32api.Sleep(5000)
retryCount = retryCount - 1
if errCode:
raise ConnectionError(errCode, win32ras.GetErrorString(errCode))
return 1, rasHandle
def Disconnect(handle):
if type(handle)==type(''): # have they passed a connection name?
for info in win32ras.EnumConnections():
if info[1].lower()==handle.lower():
handle = info[0]
break
else:
raise ConnectionError(0, "Not connected to entry '%s'" % handle)
win32ras.HangUp(handle)
usage="""rasutil.py - Utilities for using RAS
Usage:
rasutil [-r retryCount] [-c rasname] [-d rasname]
-r retryCount - Number of times to retry the RAS connection
-c rasname - Connect to the phonebook entry specified by rasname
-d rasname - Disconnect from the phonebook entry specified by rasname
"""
def Usage(why):
print why
print usage
sys.exit(1)
if __name__=='__main__':
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "r:c:d:")
except getopt.error, why:
Usage(why)
retries = 5
if len(args) != 0:
Usage("Invalid argument")
for opt, val in opts:
if opt=='-c':
Connect(val, retries)
if opt=='-d':
Disconnect(val)
if opt=='-r':
retries = int(val)
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/win32/scripts/rasutil.py",
"copies": "4",
"size": "2466",
"license": "bsd-3-clause",
"hash": 1064099792854080600,
"line_mean": 26.3448275862,
"line_max": 87,
"alpha_frac": 0.6816707218,
"autogenerated": false,
"ratio": 3.2277486910994764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5909419412899476,
"avg_score": null,
"num_lines": null
} |
"""A demo of using win32net.NetValidatePasswordPolicy.
Example usage:
% NetValidatePasswordPolicy.py --password=foo change
which might return:
> Result of 'change' validation is 0: The operation completed successfully.
or depending on the policy:
> Result of 'change' validation is 2245: The password does not meet the
> password policy requirements. Check the minimum password length,
> password complexity and password history requirements.
Adding --user doesn't seem to change the output (even the PasswordLastSet seen
when '-f' is used doesn't depend on the username), but theoretically it will
also check the password history for the specified user.
% NetValidatePasswordPolicy.py auth
which always (with and without '-m') seems to return:
> Result of 'auth' validation is 2701: Password must change at next logon
"""
import sys
import win32api
import win32net, win32netcon
import optparse
from pprint import pprint
def main():
parser = optparse.OptionParser("%prog [options] add|change ...",
description="A win32net.NetValidatePasswordPolicy demo.")
parser.add_option("-u", "--username",
action="store",
help="The username to pass to the function (only for the "
"change command")
parser.add_option("-p", "--password",
action="store",
help="The clear-text password to pass to the function "
"(only for the 'change' command)")
parser.add_option("-m", "--password-matched",
action="store_false", default=True,
help="Used to specify the password does NOT match (ie, "
"uses False for the PasswordMatch/PasswordMatched "
"arg, both 'auth' and 'change' commands)")
parser.add_option("-s", "--server",
action="store",
help="The name of the server to execute the command on")
parser.add_option("-f", "--show_fields",
action="store_true", default=False,
help="Print the NET_VALIDATE_PERSISTED_FIELDS returned")
options, args = parser.parse_args()
if not args:
parser.error("You must supply an arg")
for arg in args:
if arg == "auth":
input = {"PasswordMatched": options.password_matched,
}
val_type = win32netcon.NetValidateAuthentication
elif arg == "change":
input = {"ClearPassword": options.password,
"PasswordMatch": options.password_matched,
"UserAccountName": options.username,
}
val_type = win32netcon.NetValidatePasswordChange
else:
parser.error("Invalid arg - must be 'auth' or 'change'")
try:
fields, status = win32net.NetValidatePasswordPolicy(options.server,
None, val_type, input)
except NotImplementedError:
print "NetValidatePasswordPolicy not implemented on this platform."
return 1
except win32net.error, exc:
print "NetValidatePasswordPolicy failed: ", exc
return 1
if options.show_fields:
print "NET_VALIDATE_PERSISTED_FIELDS fields:"
pprint(fields)
print "Result of %r validation is %d: %s" % \
(arg, status, win32api.FormatMessage(status).strip())
return 0
if __name__=='__main__':
sys.exit(main())
| {
"repo_name": "aurelijusb/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/Demos/NetValidatePasswordPolicy.py",
"copies": "17",
"size": "3629",
"license": "apache-2.0",
"hash": 6831963372227602000,
"line_mean": 34.9306930693,
"line_max": 92,
"alpha_frac": 0.5905208046,
"autogenerated": false,
"ratio": 4.634738186462324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A demo of using win32net.NetValidatePasswordPolicy.
Example usage:
% NetValidatePasswordPolicy.py --password=foo change
which might return:
> Result of 'change' validation is 0: The operation completed successfully.
or depending on the policy:
> Result of 'change' validation is 2245: The password does not meet the
> password policy requirements. Check the minimum password length,
> password complexity and password history requirements.
Adding --user doesn't seem to change the output (even the PasswordLastSet seen
when '-f' is used doesn't depend on the username), but theoretically it will
also check the password history for the specified user.
% NetValidatePasswordPolicy.py auth
which always (with and without '-m') seems to return:
> Result of 'auth' validation is 2701: Password must change at next logon
"""
import sys
import win32api
import win32net, win32netcon
import optparse
from pprint import pprint
def main():
parser = optparse.OptionParser("%prog [options] auth|change ...",
description="A win32net.NetValidatePasswordPolicy demo.")
parser.add_option("-u", "--username",
action="store",
help="The username to pass to the function (only for the "
"change command")
parser.add_option("-p", "--password",
action="store",
help="The clear-text password to pass to the function "
"(only for the 'change' command)")
parser.add_option("-m", "--password-matched",
action="store_false", default=True,
help="Used to specify the password does NOT match (ie, "
"uses False for the PasswordMatch/PasswordMatched "
"arg, both 'auth' and 'change' commands)")
parser.add_option("-s", "--server",
action="store",
help="The name of the server to execute the command on")
parser.add_option("-f", "--show_fields",
action="store_true", default=False,
help="Print the NET_VALIDATE_PERSISTED_FIELDS returned")
options, args = parser.parse_args()
if not args:
args = ["auth"]
for arg in args:
if arg == "auth":
input = {"PasswordMatched": options.password_matched,
}
val_type = win32netcon.NetValidateAuthentication
elif arg == "change":
input = {"ClearPassword": options.password,
"PasswordMatch": options.password_matched,
"UserAccountName": options.username,
}
val_type = win32netcon.NetValidatePasswordChange
else:
parser.error("Invalid arg - must be 'auth' or 'change'")
try:
fields, status = win32net.NetValidatePasswordPolicy(options.server,
None, val_type, input)
except NotImplementedError:
print "NetValidatePasswordPolicy not implemented on this platform."
return 1
except win32net.error, exc:
print "NetValidatePasswordPolicy failed: ", exc
return 1
if options.show_fields:
print "NET_VALIDATE_PERSISTED_FIELDS fields:"
pprint(fields)
print "Result of %r validation is %d: %s" % \
(arg, status, win32api.FormatMessage(status).strip())
return 0
if __name__=='__main__':
sys.exit(main())
| {
"repo_name": "zhanqxun/cv_fish",
"path": "win32/Demos/NetValidatePasswordPolicy.py",
"copies": "4",
"size": "3708",
"license": "apache-2.0",
"hash": -6767162657413190000,
"line_mean": 34.7128712871,
"line_max": 92,
"alpha_frac": 0.5725458468,
"autogenerated": false,
"ratio": 4.623441396508728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023335833598883775,
"num_lines": 101
} |
# A demo plugin for Microsoft Excel
#
# This addin simply adds a new button to the main Excel toolbar,
# and displays a message box when clicked. Thus, it demonstrates
# how to plug in to Excel itself, and hook Excel events.
#
#
# To register the addin, simply execute:
# excelAddin.py
# This will install the COM server, and write the necessary
# AddIn key to Excel
#
# To unregister completely:
# excelAddin.py --unregister
#
# To debug, execute:
# excelAddin.py --debug
#
# Then open Pythonwin, and select "Tools->Trace Collector Debugging Tool"
# Restart excel, and you should see some output generated.
#
# NOTE: If the AddIn fails with an error, Excel will re-register
# the addin to not automatically load next time Excel starts. To
# correct this, simply re-register the addin (see above)
#
# Author <ekoome@yahoo.com> Eric Koome
# Copyright (c) 2003 Wavecom Inc. All rights reserved
#
# Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#
#1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ERIC KOOME OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from win32com import universal
from win32com.server.exception import COMException
from win32com.client import gencache, DispatchWithEvents
import winerror
import pythoncom
from win32com.client import constants, Dispatch
import sys
# Support for COM objects we use.
gencache.EnsureModule('{00020813-0000-0000-C000-000000000046}', 0, 1, 3, bForDemand=True) # Excel 9
gencache.EnsureModule('{2DF8D04C-5BFA-101B-BDE5-00AA0044DE52}', 0, 2, 1, bForDemand=True) # Office 9
# The TLB defiining the interfaces we implement
universal.RegisterInterfaces('{AC0714F2-3D04-11D1-AE7D-00A0C90F26F4}', 0, 1, 0, ["_IDTExtensibility2"])
class ButtonEvent:
def OnClick(self, button, cancel):
import win32ui # Possible, but not necessary, to use a Pythonwin GUI
import win32con
win32ui.MessageBox("Hello from Python", "Python Test",win32con.MB_OKCANCEL)
return cancel
class ExcelAddin:
_com_interfaces_ = ['_IDTExtensibility2']
_public_methods_ = []
_reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER
_reg_clsid_ = "{C5482ECA-F559-45A0-B078-B2036E6F011A}"
_reg_progid_ = "Python.Test.ExcelAddin"
_reg_policy_spec_ = "win32com.server.policy.EventHandlerPolicy"
def __init__(self):
self.appHostApp = None
def OnConnection(self, application, connectMode, addin, custom):
print("OnConnection", application, connectMode, addin, custom)
try:
self.appHostApp = application
cbcMyBar = self.appHostApp.CommandBars.Add(Name="PythonBar", Position=constants.msoBarTop, MenuBar=constants.msoBarTypeNormal, Temporary=True)
btnMyButton = cbcMyBar.Controls.Add(Type=constants.msoControlButton, Parameter="Greetings")
btnMyButton=self.toolbarButton = DispatchWithEvents(btnMyButton, ButtonEvent)
btnMyButton.Style = constants.msoButtonCaption
btnMyButton.BeginGroup = True
btnMyButton.Caption = "&Python"
btnMyButton.TooltipText = "Python rules the World"
btnMyButton.Width = "34"
cbcMyBar.Visible = True
except pythoncom.com_error as xxx_todo_changeme:
(hr, msg, exc, arg) = xxx_todo_changeme.args
print("The Excel call failed with code %d: %s" % (hr, msg))
if exc is None:
print("There is no extended error information")
else:
wcode, source, text, helpFile, helpId, scode = exc
print("The source of the error is", source)
print("The error message is", text)
print("More info can be found in %s (id=%d)" % (helpFile, helpId))
def OnDisconnection(self, mode, custom):
print("OnDisconnection")
self.appHostApp.CommandBars("PythonBar").Delete
self.appHostApp=None
def OnAddInsUpdate(self, custom):
print("OnAddInsUpdate", custom)
def OnStartupComplete(self, custom):
print("OnStartupComplete", custom)
def OnBeginShutdown(self, custom):
print("OnBeginShutdown", custom)
def RegisterAddin(klass):
import winreg
key = winreg.CreateKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Excel\\Addins")
subkey = winreg.CreateKey(key, klass._reg_progid_)
winreg.SetValueEx(subkey, "CommandLineSafe", 0, winreg.REG_DWORD, 0)
winreg.SetValueEx(subkey, "LoadBehavior", 0, winreg.REG_DWORD, 3)
winreg.SetValueEx(subkey, "Description", 0, winreg.REG_SZ, "Excel Addin")
winreg.SetValueEx(subkey, "FriendlyName", 0, winreg.REG_SZ, "A Simple Excel Addin")
def UnregisterAddin(klass):
import winreg
try:
winreg.DeleteKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Excel\\Addins\\" + klass._reg_progid_)
except WindowsError:
pass
if __name__ == '__main__':
import win32com.server.register
win32com.server.register.UseCommandLine(ExcelAddin)
if "--unregister" in sys.argv:
UnregisterAddin(ExcelAddin)
else:
RegisterAddin(ExcelAddin)
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/win32com/demos/excelAddin.py",
"copies": "10",
"size": "5943",
"license": "mit",
"hash": -9039546329836360000,
"line_mean": 41.7553956835,
"line_max": 154,
"alpha_frac": 0.7048628639,
"autogenerated": false,
"ratio": 3.6149635036496353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006759547575320768,
"num_lines": 139
} |
# A demo plugin for Microsoft Outlook (NOT Outlook Express)
#
# This addin simply adds a new button to the main Outlook toolbar,
# and displays a message box when clicked. Thus, it demonstrates
# how to plug in to Outlook itself, and hook outlook events.
#
# Additionally, each time a new message arrives in the Inbox, a message
# is printed with the subject of the message.
#
# To register the addin, simply execute:
# outlookAddin.py
# This will install the COM server, and write the necessary
# AddIn key to Outlook
#
# To unregister completely:
# outlookAddin.py --unregister
#
# To debug, execute:
# outlookAddin.py --debug
#
# Then open Pythonwin, and select "Tools->Trace Collector Debugging Tool"
# Restart Outlook, and you should see some output generated.
#
# NOTE: If the AddIn fails with an error, Outlook will re-register
# the addin to not automatically load next time Outlook starts. To
# correct this, simply re-register the addin (see above)
from win32com import universal
from win32com.server.exception import COMException
from win32com.client import gencache, DispatchWithEvents
import winerror
import pythoncom
from win32com.client import constants
import sys
# Support for COM objects we use.
gencache.EnsureModule('{00062FFF-0000-0000-C000-000000000046}', 0, 9, 0, bForDemand=True) # Outlook 9
gencache.EnsureModule('{2DF8D04C-5BFA-101B-BDE5-00AA0044DE52}', 0, 2, 1, bForDemand=True) # Office 9
# The TLB defining the interfaces we implement
universal.RegisterInterfaces('{AC0714F2-3D04-11D1-AE7D-00A0C90F26F4}', 0, 1, 0, ["_IDTExtensibility2"])
class ButtonEvent:
def OnClick(self, button, cancel):
import win32ui # Possible, but not necessary, to use a Pythonwin GUI
win32ui.MessageBox("Hello from Python")
return cancel
class FolderEvent:
def OnItemAdd(self, item):
try:
print("An item was added to the inbox with subject:", item.Subject)
except AttributeError:
print("An item was added to the inbox, but it has no subject! - ", repr(item))
class OutlookAddin:
_com_interfaces_ = ['_IDTExtensibility2']
_public_methods_ = []
_reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER
_reg_clsid_ = "{0F47D9F3-598B-4d24-B7E3-92AC15ED27E2}"
_reg_progid_ = "Python.Test.OutlookAddin"
_reg_policy_spec_ = "win32com.server.policy.EventHandlerPolicy"
def OnConnection(self, application, connectMode, addin, custom):
print("OnConnection", application, connectMode, addin, custom)
# ActiveExplorer may be none when started without a UI (eg, WinCE synchronisation)
activeExplorer = application.ActiveExplorer()
if activeExplorer is not None:
bars = activeExplorer.CommandBars
toolbar = bars.Item("Standard")
item = toolbar.Controls.Add(Type=constants.msoControlButton, Temporary=True)
# Hook events for the item
item = self.toolbarButton = DispatchWithEvents(item, ButtonEvent)
item.Caption="Python"
item.TooltipText = "Click for Python"
item.Enabled = True
# And now, for the sake of demonstration, setup a hook for all new messages
inbox = application.Session.GetDefaultFolder(constants.olFolderInbox)
self.inboxItems = DispatchWithEvents(inbox.Items, FolderEvent)
def OnDisconnection(self, mode, custom):
print("OnDisconnection")
def OnAddInsUpdate(self, custom):
print("OnAddInsUpdate", custom)
def OnStartupComplete(self, custom):
print("OnStartupComplete", custom)
def OnBeginShutdown(self, custom):
print("OnBeginShutdown", custom)
def RegisterAddin(klass):
import winreg
key = winreg.CreateKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins")
subkey = winreg.CreateKey(key, klass._reg_progid_)
winreg.SetValueEx(subkey, "CommandLineSafe", 0, winreg.REG_DWORD, 0)
winreg.SetValueEx(subkey, "LoadBehavior", 0, winreg.REG_DWORD, 3)
winreg.SetValueEx(subkey, "Description", 0, winreg.REG_SZ, klass._reg_progid_)
winreg.SetValueEx(subkey, "FriendlyName", 0, winreg.REG_SZ, klass._reg_progid_)
def UnregisterAddin(klass):
import winreg
try:
winreg.DeleteKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins\\" + klass._reg_progid_)
except WindowsError:
pass
if __name__ == '__main__':
import win32com.server.register
win32com.server.register.UseCommandLine(OutlookAddin)
if "--unregister" in sys.argv:
UnregisterAddin(OutlookAddin)
else:
RegisterAddin(OutlookAddin)
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/win32com/demos/outlookAddin.py",
"copies": "5",
"size": "4628",
"license": "mit",
"hash": -7694411452974078000,
"line_mean": 39.5964912281,
"line_max": 121,
"alpha_frac": 0.7100259291,
"autogenerated": false,
"ratio": 3.5355233002291824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6745549229329182,
"avg_score": null,
"num_lines": null
} |
# A demo run on one image
import torch
import os
from skimage import io, transform
import numpy as np
import time
import datetime
from model.model import WSL, load_pretrained
import data_utils.load_voc as load_voc
import argparse
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from spn_codes.models import SPNetWSL
from evaluate.rst_for_corloc import rst_for_corloc
from evaluate.corloc_eval import corloc
from evaluate.get_attention_map import process_one
data_dir = '/home/zhangyu/data/VOC2007_test/'
imgDir = os.path.join(data_dir, 'JPEGImages')
xml_files = os.path.join(data_dir, 'Annotations')
ck_pt = '/disk3/zhangyu/WeaklyDetection/spn_new/\
checkpt/best_model/best_checkpoint_epoch20.pth.tar'
gpuID = 0
img_name = '000182'
demo_img = os.path.join(imgDir, '{}.jpg'.format(img_name))
demo_xml = os.path.join(xml_files, '{}.xml'.format(img_name))
img = io.imread(demo_img)
img_sz = np.array(list(img.shape[:2]))
trans = transforms.Compose([
load_voc.Rescale((224,224)),
load_voc.ToTensor(),
load_voc.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
cls = load_voc.load_class(demo_xml)
sample = {'filename':demo_img, 'sz': img_sz, 'image': img, 'class': cls}
sample = trans(sample)
sample['image'] = sample['image'].unsqueeze(0).float()
input_var = torch.autograd.Variable(sample['image'], volatile=True).cuda(gpuID)
num_class = 20
net = WSL(num_class)
load_pretrained(net, ck_pt)
net.eval()
net.cuda(gpuID)
cls_scores, ft = net.get_att_map(input_var)
lr_weigth = net.classifier[1].weight.cpu().data.numpy()
ft = ft.cpu().data.numpy()
atten_maps = process_one(ft[0,:,:,:], lr_weigth, None)
print('Process finished!')
| {
"repo_name": "zhangyuygss/WSL",
"path": "demo.py",
"copies": "1",
"size": "1719",
"license": "bsd-3-clause",
"hash": -8451970019987063000,
"line_mean": 30.2545454545,
"line_max": 79,
"alpha_frac": 0.7143688191,
"autogenerated": false,
"ratio": 2.7996742671009773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9005084257565934,
"avg_score": 0.0017917657270085663,
"num_lines": 55
} |
# A demo which creates a view and a frame which displays a PPM format bitmap
#
# This hasnnt been run in a while, as I dont have many of that format around!
import win32ui
import win32con
import win32api
import string
class DIBView:
def __init__(self, doc, dib):
self.dib = dib
self.view = win32ui.CreateView(doc)
self.width = self.height = 0
# set up message handlers
# self.view.OnPrepareDC = self.OnPrepareDC
self.view.HookMessage (self.OnSize, win32con.WM_SIZE)
def OnSize (self, params):
lParam = params[3]
self.width = win32api.LOWORD(lParam)
self.height = win32api.HIWORD(lParam)
def OnDraw (self, ob, dc):
# set sizes used for "non strecth" mode.
self.view.SetScrollSizes(win32con.MM_TEXT, self.dib.GetSize())
dibSize = self.dib.GetSize()
dibRect = (0,0,dibSize[0], dibSize[1])
# stretch BMP.
#self.dib.Paint(dc, (0,0,self.width, self.height),dibRect)
# non stretch.
self.dib.Paint(dc)
class DIBDemo:
def __init__(self, filename, * bPBM):
# init data members
f = open(filename, 'rb')
dib=win32ui.CreateDIBitmap()
if len(bPBM)>0:
magic=f.readline()
if magic <> "P6\n":
print "The file is not a PBM format file"
raise "Failed"
# check magic?
rowcollist=string.split(f.readline())
cols=string.atoi(rowcollist[0])
rows=string.atoi(rowcollist[1])
f.readline() # whats this one?
dib.LoadPBMData(f,(cols,rows))
else:
dib.LoadWindowsFormatFile(f)
f.close()
# create doc/view
self.doc = win32ui.CreateDoc()
self.dibView = DIBView( self.doc, dib )
self.frame = win32ui.CreateMDIFrame()
self.frame.LoadFrame() # this will force OnCreateClient
self.doc.SetTitle ('DIB Demo')
self.frame.ShowWindow()
# display the sucka
self.frame.ActivateFrame()
def OnCreateClient( self, createparams, context ):
self.dibView.view.CreateWindow(self.frame)
return 1
if __name__=='__main__':
import demoutils
demoutils.NotAScript() | {
"repo_name": "abaditsegay/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/dibdemo.py",
"copies": "17",
"size": "1930",
"license": "apache-2.0",
"hash": 4464716752998944300,
"line_mean": 26.9855072464,
"line_max": 77,
"alpha_frac": 0.6953367876,
"autogenerated": false,
"ratio": 2.665745856353591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A demo which runs object detection on camera frames.
"""
import argparse
import collections
import colorsys
import itertools
import time
from coral.cloudiot.core import CloudIot
from edgetpu.detection.engine import DetectionEngine
from edgetpuvision import svg
from edgetpuvision import utils
from edgetpuvision.apps import run_app, run_server
CSS_STYLES = str(svg.CssStyle({'.back': svg.Style(fill='black',
stroke='black',
stroke_width='0.5em'),
'.bbox': svg.Style(fill_opacity=0.0,
stroke_width='0.1em')}))
BBox = collections.namedtuple('BBox', ('x', 'y', 'w', 'h'))
BBox.area = lambda self: self.w * self.h
BBox.scale = lambda self, sx, sy: BBox(x=self.x * sx, y=self.y * sy,
w=self.w * sx, h=self.h * sy)
BBox.__str__ = lambda self: 'BBox(x=%.2f y=%.2f w=%.2f h=%.2f)' % self
Object = collections.namedtuple('Object', ('id', 'label', 'score', 'bbox_flat', 'bbox'))
Object.__str__ = lambda self: 'Object(id=%d, label=%s, score=%.2f, %s)' % self
def size_em(length):
return '%sem' % str(0.6 * length)
def color(i, total):
return tuple(int(255.0 * c) for c in colorsys.hsv_to_rgb(i / total, 1.0, 1.0))
def make_palette(keys):
return {key : svg.rgb(color(i, len(keys))) for i, key in enumerate(keys)}
def make_get_color(color, labels):
if color:
return lambda obj_id: color
if labels:
palette = make_palette(labels.keys())
return lambda obj_id: palette[obj_id]
return lambda obj_id: 'white'
def overlay(title, objs, get_color, inference_time, inference_rate, layout):
x0, y0, width, height = layout.window
font_size = 0.03 * height
defs = svg.Defs()
defs += CSS_STYLES
doc = svg.Svg(width=width, height=height,
viewBox='%s %s %s %s' % layout.window,
font_size=font_size, font_family='monospace', font_weight=500)
doc += defs
for obj in objs:
percent = int(100 * obj.score)
if obj.label:
caption = '%d%% %s' % (percent, obj.label)
else:
caption = '%d%%' % percent
x, y, w, h = obj.bbox.scale(*layout.size)
color = get_color(obj.id)
doc += svg.Rect(x=x, y=y, width=w, height=h,
style='fill:%s;fill-opacity:0.1;stroke-width:3;stroke:%s' % (color, color),
_class='bbox')
doc += svg.Rect(x=x, y=y + h,
width=size_em(len(caption)), height='1.2em', fill=color)
t = svg.Text(x=x, y=y + h, fill='black')
t += svg.TSpan(caption, dy='1em')
doc += t
ox = x0 + 20
oy1, oy2 = y0 + 20 + font_size, y0 + height - 20
# Title
if title:
doc += svg.Rect(x=0, y=0, width=size_em(len(title)), height='1em',
transform='translate(%s, %s) scale(1,-1)' % (ox, oy1), _class='back')
doc += svg.Text(title, x=ox, y=oy1, fill='white')
# Info
lines = [
'Objects: %d' % len(objs),
'Inference time: %.2f ms (%.2f fps)' % (inference_time * 1000, 1.0 / inference_time)
]
for i, line in enumerate(reversed(lines)):
y = oy2 - i * 1.7 * font_size
doc += svg.Rect(x=0, y=0, width=size_em(len(line)), height='1em',
transform='translate(%s, %s) scale(1,-1)' % (ox, y),
_class='back')
doc += svg.Text(line, x=ox, y=y, fill='white')
return str(doc)
def convert(obj, labels):
x0, y0, x1, y1 = obj.bounding_box.flatten().tolist()
bbox_flat = obj.bounding_box.flatten().tolist()
bbox_flat[0] *= 640.0
bbox_flat[1] *= 360.0
bbox_flat[2] *= 640.0
bbox_flat[3] *= 360.0
return Object(id=obj.label_id,
label=labels[obj.label_id] if labels else None,
score=obj.score,
bbox_flat=bbox_flat,
bbox=BBox(x=x0, y=y0, w=x1 - x0, h=y1 - y0))
def print_results(inference_rate, objs):
print('\nInference (rate=%.2f fps):' % inference_rate)
for i, obj in enumerate(objs):
print(' %d: %s, area=%.2f' % (i, obj, obj.bbox.area()))
def render_gen(args):
global minimum_backoff_time
import json
import random
fps_counter = utils.avg_fps_counter(30)
engines, titles = utils.make_engines(args.model, DetectionEngine)
assert utils.same_input_image_sizes(engines)
engines = itertools.cycle(engines)
engine = next(engines)
labels = utils.load_labels(args.labels) if args.labels else None
filtered_labels = set(l.strip() for l in args.filter.split(',')) if args.filter else None
get_color = make_get_color(args.color, labels)
draw_overlay = True
yield utils.input_image_size(engine)
output = None
message_count = 0
inference_time_window = collections.deque(maxlen=30)
inference_time = 0.0
with CloudIot(args.cloud_config) as cloud:
while True:
d = {}
tensor, layout, command = (yield output)
inference_rate = next(fps_counter)
if draw_overlay:
start = time.monotonic()
objs = engine.detect_with_input_tensor(tensor,
threshold=args.threshold,
top_k=args.top_k)
inference_time_ = time.monotonic() - start
inference_time_window.append(inference_time_)
inference_time = sum(inference_time_window) / len(inference_time_window)
objs = [convert(obj, labels) for obj in objs]
if labels and filtered_labels:
objs = [obj for obj in objs if obj.label in filtered_labels]
objs = [obj for obj in objs if args.min_area <= obj.bbox.area() <= args.max_area]
if args.print:
print_results(inference_rate, objs)
for ind, obj in enumerate(objs):
tx = obj.label
o = {"name": tx, "points": ",".join([str(i) for i in obj.bbox_flat])}
d[ind] = o
title = titles[engine]
output = overlay(title, objs, get_color, inference_time, inference_rate, layout)
else:
output = None
if command == 'o':
draw_overlay = not draw_overlay
elif command == 'n':
engine = next(engines)
payload = json.dumps(d)
if message_count > 0 and message_count % 10 == 0:
print("-" * 20)
print(d)
print("-" * 20)
cloud.publish_message(payload)
message_count += 1
def add_render_gen_args(parser):
parser.add_argument('--model',
help='.tflite model path', required=True)
parser.add_argument('--labels',
help='labels file path')
parser.add_argument('--top_k', type=int, default=50,
help='Max number of objects to detect')
parser.add_argument('--threshold', type=float, default=0.1,
help='Detection threshold')
parser.add_argument('--min_area', type=float, default=0.0,
help='Min bounding box area')
parser.add_argument('--max_area', type=float, default=1.0,
help='Max bounding box area')
parser.add_argument('--filter', default=None,
help='Comma-separated list of allowed labels')
parser.add_argument('--color', default=None,
help='Bounding box display color'),
parser.add_argument('--print', default=False, action='store_true',
help='Print inference results')
parser.add_argument('--cloud_config',
help='Cloud Config path', required=True)
def main():
# Switch to run_app if you want to run the app with HDMI
run_server(add_render_gen_args, render_gen)
# run_app(add_render_gen_args, render_gen)
if __name__ == '__main__':
main()
| {
"repo_name": "google-coral/project-cloud-monitor",
"path": "edge/detect_cloudiot.py",
"copies": "1",
"size": "8226",
"license": "apache-2.0",
"hash": -535258722781447230,
"line_mean": 38.3588516746,
"line_max": 99,
"alpha_frac": 0.5408460977,
"autogenerated": false,
"ratio": 3.567215958369471,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4608062056069471,
"avg_score": null,
"num_lines": null
} |
""" A Design Of Experiment (DOE) changes one or several component parameters to create a model
"""
import itertools as it
from io import StringIO
from pathlib import Path
from typing import Any, Dict, List, Union
from omegaconf import OmegaConf
from pp.config import CONFIG
sample = """
does:
cutback_taper_te_400:
component: cutback_taper_te
settings:
wg_width: [0.4]
length: [1.5]
n_devices_target: [30, 60, 120]
"""
def load_does(filepath: Union[Path, StringIO]) -> Dict[str, Any]:
"""Return dictionary with the information loaded from does.yml
Args:
filepath: yaml file describing does
Returns:
a dictionnary of DOEs with:
{
doe_name1: [(component_factory_name, parameters), ...]
doe_name2: [(component_factory_name, parameters), ...]
...
}
.. code:: python
mmi1x2_gap:
component: mmi1x2
gap: [0.5, 0.6]
length: 10
mmi1x2:
component: mmi1x2
length: [11, 12]
gap: [0.2, 0.3]
do_permutation: False
"""
does = {}
input_does = OmegaConf.load(filepath)
input_does = OmegaConf.to_container(input_does)
for doe_name, doe in input_does.items():
if doe_name == "mask":
continue
if doe.get("type", "") == "template":
continue
if "do_permutation" in doe:
do_permutation = doe.pop("do_permutation")
else:
do_permutation = True
if not doe.get("settings"):
raise ValueError(f"Error, missing settings: for {doe_name}")
doe_settings = doe.pop("settings", "")
if doe_settings:
doe["settings"] = get_settings_list(do_permutation, **doe_settings)
else:
raise ValueError(
f"DOE {doe_name} is not a dictionary",
f"\n\t got: {doe}\n\t sample: {sample}",
)
does[doe_name] = doe
return does
def get_settings_list(do_permutations: bool = True, **kwargs) -> List[Dict[str, Any]]:
"""Return a list of settings
Args:
do_permutations: if False, will only zip the values passed for each parameter
and will not use any combination with default arguments
**kwargs: Keyword arguments with a list or tuple of desired values to sweep
Usage:
import pp
pp.doe.get_settings_list(length=[30, 40]) # adds different lengths
pp.doe.get_settings_list(length=[30, 40], width=[4, 8])
pp.doe.get_settings_list(length=[30, 40], width=[4, 8])
# if do_permutations=True, does all combinations (L30W4, L30W8, L40W4, L40W8)
# if do_permutations=False, zips arguments (L30W4, L40W8)
add variations of self.baseclass in self.components
get arguments from default_args and then update them from kwargs
updates default_args with kwargs
self.settings lists all the variations
"""
# Deal with empty parameter case
if kwargs == {}:
return [dict()]
# Accept both values or lists
for key, value in list(kwargs.items()):
if not isinstance(value, list):
kwargs[key] = [value]
if do_permutations:
keys, list_values = list(zip(*[x for x in list(kwargs.items())]))
settings = [dict(list(zip(keys, perms))) for perms in it.product(*list_values)]
else:
keys, list_values = list(zip(*[x for x in list(kwargs.items())]))
settings = [dict(list(zip(keys, values))) for values in zip(*list_values)]
return settings
def test_load_does() -> Dict[str, Any]:
filepath = CONFIG["samples_path"] / "mask" / "does.yml"
does = load_does(filepath)
assert len(does) == 4
return does
if __name__ == "__main__":
test_load_does()
# from pprint import pprint
# does_path = CONFIG["samples_path"] / "mask" / "does.yml"
# pprint(load_does(does_path))
| {
"repo_name": "gdsfactory/gdsfactory",
"path": "pp/doe.py",
"copies": "1",
"size": "3974",
"license": "mit",
"hash": -2968411002781651000,
"line_mean": 27.3857142857,
"line_max": 94,
"alpha_frac": 0.590840463,
"autogenerated": false,
"ratio": 3.655933762649494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4746774225649494,
"avg_score": null,
"num_lines": null
} |
""" A Design Of Experiment (DOE) changes one or several component parameters to create a model
"""
import itertools as it
from omegaconf import OmegaConf
from pp.config import CONFIG
sample = """
does:
cutback_taper_te_400:
component: cutback_taper_te
settings:
wg_width: [0.4]
length: [1.5]
n_devices_target: [30, 60, 120]
"""
def load_does(filepath):
""" returns a dictionary with the information loaded from does.yml
Args:
filepath: yaml file describes does
Returns:
a dictionnary of DOEs with:
{
doe_name1: [(component_factory_name, parameters), ...]
doe_name2: [(component_factory_name, parameters), ...]
...
}
.. code:: python
mmi1x2_gap:
component: mmi1x2
gap: [0.5, 0.6]
length: 10
mmi1x2:
component: mmi1x2
length: [11, 12]
gap: [0.2, 0.3]
do_permutation: False
"""
does = {}
input_does = OmegaConf.load(filepath)
input_does = OmegaConf.to_container(input_does)
for doe_name, doe in input_does.items():
if doe_name == "mask":
continue
if "do_permutation" in doe:
do_permutation = doe.pop("do_permutation")
else:
do_permutation = True
assert doe.get("settings"), "need to define settings for doe {}".format(
doe_name
)
doe_settings = doe.pop("settings", "")
if doe_settings:
doe["settings"] = get_settings_list(do_permutation, **doe_settings)
else:
raise ValueError(
"DOE {doe_name} needs to be a dictionary\n\t got: {doe}\n\t sample: {sample"
)
does[doe_name] = doe
return does
def get_settings_list(do_permutations=True, **kwargs):
"""
Returns a list of settings
Args:
do_permutations: if False, will only zip the values passed for each parameter
and will not use any combination with default arguments
**kwargs: Keyword arguments with a list or tuple of desired values to sweep
Usage:
import pp
pp.doe.get_settings_list(length=[30, 40]) # adds different lengths
pp.doe.get_settings_list(length=[30, 40], width=[4, 8]) # if do_permutations=False, zips arguments (L30W4, L40W8)
pp.doe.get_settings_list(length=[30, 40], width=[4, 8]) # if do_permutations=True, does all combinations (L30W4, L30W8, L40W4, L40W8)
add variations of self.baseclass in self.components
get arguments from default_args and then update them from kwargs
updates default_args with kwargs
self.settings lists all the variations
"""
# Deal with empty parameter case
if kwargs == {}:
return {}
# Accept both values or lists
for key, value in list(kwargs.items()):
if not isinstance(value, list):
kwargs[key] = [value]
if do_permutations:
keys, list_values = list(zip(*[x for x in list(kwargs.items())]))
settings = [dict(list(zip(keys, perms))) for perms in it.product(*list_values)]
else:
keys, list_values = list(zip(*[x for x in list(kwargs.items())]))
settings = [dict(list(zip(keys, values))) for values in zip(*list_values)]
return settings
def test_load_does():
filepath = CONFIG["samples_path"] / "mask" / "does.yml"
does = load_does(filepath)
assert len(does) == 2
return does
if __name__ == "__main__":
test_load_does()
# from pprint import pprint
# does_path = CONFIG["samples_path"] / "mask" / "does.yml"
# pprint(load_does(does_path))
| {
"repo_name": "psiq/gdsfactory",
"path": "pp/doe.py",
"copies": "1",
"size": "3697",
"license": "mit",
"hash": 553985606263701700,
"line_mean": 26.7969924812,
"line_max": 142,
"alpha_frac": 0.5912902353,
"autogenerated": false,
"ratio": 3.6495557749259624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4740846010225962,
"avg_score": null,
"num_lines": null
} |
"""A device that can be attached to"""
from Device import *
class Attachable(Device):
def __init__(self):
"""
Create a device that can be attached to.
"""
Device.__init__(self)
self.menu.addAction("Restart", self.restart)
self.menu.addAction("Stop", self.terminate)
def attach(self):
"""
Attach to corresponding device on backend.
"""
base = "ssh -t " + options["username"] + "@" + options["server"]
screen = " screen -r "
if self.device_type == "Wireless_access_point":
screen += "WAP_%d" % self.getID()
else:
name = self.getName()
pid = mainWidgets["tm"].getPID(name)
if not pid:
return
screen += pid + "." + name
command = ""
window_name = str(self.getProperty("Name")) # the strcast is necessary for cloning
if(self.getName() != window_name):
window_name += " (" + self.getName() + ")"
if environ["os"] == "Windows":
startpath = environ["tmp"] + self.getName() + ".start"
try:
outfile = open(startpath, "w")
outfile.write(screen)
outfile.close()
except:
mainWidgets["log"].append("Failed to write to start file!")
return
command += "putty -"
if options["session"]:
command += "load " + options["session"] + " -l " + options["username"] + " -t"
else:
command += base
command += " -m \"" + startpath + "\""
else:
command += "rxvt -T \"" + window_name + "\" -e " + base + screen
self.shell = subprocess.Popen(str(command), shell=True) | {
"repo_name": "michaelkourlas/gini",
"path": "frontend/src/gbuilder/Core/Attachable.py",
"copies": "1",
"size": "1911",
"license": "mit",
"hash": 783184875960331600,
"line_mean": 31.5614035088,
"line_max": 94,
"alpha_frac": 0.4563055992,
"autogenerated": false,
"ratio": 4.507075471698113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5463381070898113,
"avg_score": null,
"num_lines": null
} |
"""A device that can have interfaces"""
from Device import *
from Attachable import *
class Interfaceable(Attachable):
def __init__(self):
"""
Create a device that can have interfaces.
"""
Attachable.__init__(self)
self.adjacentRouterList = []
self.adjacentSubnetList = []
self.con_int = {} # the connection and interface pair
def generateToolTip(self):
"""
Add IP address(es) to the tool tip for easier lookup.
"""
tooltip = self.getName()
for interface in self.getInterfaces():
tooltip += "\n\nTarget: " + interface[QtCore.QString("target")].getName() + "\n"
tooltip += "IP: " + interface[QtCore.QString("ipv4")]
self.setToolTip(tooltip)
def addInterface(self, node):
"""
Add an interface to the list of interfaces with node as target.
"""
for interface in self.interfaces:
if interface[QtCore.QString("target")] == node:
return
self.interfaces.append({
QtCore.QString("target"):node,
QtCore.QString("ipv4"):QtCore.QString(""),
QtCore.QString("mac"):QtCore.QString(""),
QtCore.QString("routing"):[]})
def removeInterface(self, node):
"""
Remove the interface from the list of interfaces where node is the target.
"""
interface = None
for interface in self.interfaces:
if interface[QtCore.QString("target")] == node:
break
interface = None
if interface:
self.interfaces.remove(interface)
def getInterfaces(self):
"""
Return the list of interfaces.
"""
return self.interfaces
def getInterface(self, node=None, subnet=None):
"""
Return an interface from the list of interfaces specified by node or subnet.
"""
if not node and not subnet:
return self.interfaces[0]
elif subnet:
for interface in self.interfaces:
if interface[QtCore.QString("subnet")] == subnet:
return interface
else:
for interface in self.interfaces:
if interface[QtCore.QString("target")] == node:
return interface
def getInterfaceProperty(self, propName, node=None, subnet=None, index=0):
"""
Return an interface property specified by node or subnet.
"""
if not node and not subnet:
return self.interfaces[index][QtCore.QString(propName)]
interface = self.getInterface(node, subnet)
if interface:
return interface[QtCore.QString(propName)]
def setInterfaceProperty(self, prop, value, node=None, subnet=None, index=0):
"""
Set an interface property specified by node or subnet.
"""
if not node and not subnet:
self.interfaces[index][QtCore.QString(prop)] = QtCore.QString(value)
else:
interface = self.getInterface(node, subnet)
if not interface:
return
interface[QtCore.QString(prop)] = QtCore.QString(value)
if prop == "ipv4":
self.generateToolTip()
def getTable(self, node=None):
"""
Return the route table from the interface specified by node.
"""
return self.getInterfaceProperty("routing", node)
def getEntry(self, subnet, target):
"""
Return an entry from the route table specified by subnet and target.
"""
table = self.getInterfaceProperty("routing", target)
for entry in table:
if entry[QtCore.QString("subnet")] == subnet:
return entry
def getEntryProperty(self, prop, subnet, target):
"""
Return a property from the entry specified by subnet and target.
"""
entry = self.getEntry(subnet, target)
return entry[QtCore.QString(prop)]
def setEntryProperty(self, prop, value, subnet, target):
"""
Set a property from the entry specified by subnet and target.
"""
entry = self.getEntry(subnet, target)
entry[QtCore.QString(prop)] = value
def addEntry(self, mask, gateway, subnet, target):
"""
Add an entry to the table specified by subnet and target.
"""
entry = {QtCore.QString("netmask"):mask, QtCore.QString("gw"):gateway, QtCore.QString("subnet"):subnet}
table = self.getTable(target)
table.append(entry)
def removeEntry(self, entry, target):
"""
Remove an entry from the table specified by subnet and target.
"""
table = self.getTable(target)
table.remove(entry)
def addAdjacentRouter(self, router, interface):
"""
Add a router to the list of adjacent ones for route computations.
"""
self.adjacentRouterList.append([router, interface])
def getAdjacentRouters(self):
"""
Return the list of adjacent routers.
"""
return self.adjacentRouterList
def addAdjacentSubnet(self, subnet):
"""
Add a subnet to the list of adjacent ones for route computations.
"""
self.adjacentSubnetList.append(subnet)
def getAdjacentSubnets(self):
"""
Return the list of adjacent subnets.
"""
return self.adjacentSubnetList
def emptyAdjacentLists(self):
"""
Clear the list of adjacent routers and subnets.
"""
self.adjacentRouterList = []
self.adjacentSubnetList = []
def emptyRouteTable(self):
"""
Clear the route table.
"""
for interface in self.interfaces:
interface[QtCore.QString("routing")] = []
def hasSubnet(self, subnet):
"""
Check if the specified subnet is in the adjacent list.
"""
for sub in self.adjacentSubnetList:
if sub == subnet:
return True
return False
def searchSubnet(self, subnet):
"""
Search the specified subnet in the whole network.
"""
routerList=self.adjacentRouterList[:]
# Save all found routers in the list, so that we don't visit a router twice
foundList=[]
for r in routerList:
foundList.append(r[0])
while len(routerList) > 0:
theOne = routerList.pop(0)
if theOne[0].hasSubnet(subnet):
return (theOne[0], theOne[1])
else:
# Add its adjacent router list to the list
for router, interface in theOne[0].getAdjacentRouters():
# Check if the router is already visited or is in the to be visited list
if not router in foundList:
newOne = [router, theOne[1]]
routerList.append(newOne)
foundList.append(router)
return (None, None)
def addRoutingEntry(self, subnet):
"""
Add an entry to the route table.
"""
if not self.hasSubnet(subnet):
device, interface = self.searchSubnet(subnet)
if interface:
target = interface[QtCore.QString("target")]
if interface[QtCore.QString("subnet")] == subnet \
and self.device_type == "UML" or self.device_type == "REALM":
self.addEntry(interface[QtCore.QString("mask")],
"",
" ",
target)
elif interface[QtCore.QString("subnet")] == subnet \
and self.device_type == "REALM":
self.addEntry(interface[QtCore.QString("mask")],
"",
" ",
target)
else:
if target.device_type == "Switch":
# interfaceable = target.getTarget(self)
# gateway = interfaceable.getInterface(target)[QtCore.QString("ipv4")]
gateway = target.getGateway()
else:
gateway = target.getInterface(self)[QtCore.QString("ipv4")]
self.addEntry(interface[QtCore.QString("mask")],
gateway,
subnet,
target)
else:
if self.device_type == "Router":
interface = self.getInterface(None, subnet)
self.addEntry(interface[QtCore.QString("mask")],
"0.0.0.0",
subnet,
interface[QtCore.QString("target")])
def toString(self):
"""
Reimplemented to provide route information.
"""
devInfo = Device.toString(self)
interfaceInfo = ""
for interface in self.interfaces:
if interface.has_key(QtCore.QString("target")):
interfaceInfo += "\t\tinterface:" + interface[QtCore.QString("target")].getName() + "\n"
else:
interfaceInfo += "\t\twireless interface:\n"
for prop, value in interface.iteritems():
if prop == "target":
pass
elif prop == "routing":
for route in value:
interfaceInfo += "\t\t\t\troute:" + route[QtCore.QString("subnet")] + "\n"
for pr, val in route.iteritems():
if pr != "subnet":
interfaceInfo += "\t\t\t\t\t" + pr + ":" + val + "\n"
else:
interfaceInfo += "\t\t\t" + prop + ":" + value + "\n"
return devInfo + interfaceInfo
| {
"repo_name": "michaelkourlas/gini",
"path": "frontend/src/gbuilder/Core/Interfaceable.py",
"copies": "1",
"size": "10393",
"license": "mit",
"hash": 4355973467313571300,
"line_mean": 35.3848920863,
"line_max": 111,
"alpha_frac": 0.5122678726,
"autogenerated": false,
"ratio": 4.904672015101463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5916939887701462,
"avg_score": null,
"num_lines": null
} |
"""A device that can have interfaces"""
from Device import *
class Interfaceable(Device):
def __init__(self):
"""
Create a device that can have interfaces.
"""
Device.__init__(self)
self.menu.addAction("Restart", self.restart)
self.menu.addAction("Stop", self.terminate)
self.adjacentRouterList = []
self.adjacentSubnetList = []
self.con_int = {} # the connection and interface pair
def generateToolTip(self):
"""
Add IP address(es) to the tool tip for easier lookup.
"""
tooltip = self.getName()
for interface in self.getInterfaces():
tooltip += "\n\nTarget: " + interface[QtCore.QString("target")].getName() + "\n"
tooltip += "IP: " + interface[QtCore.QString("ipv4")]
self.setToolTip(tooltip)
def attach(self):
"""
Attach to corresponding device on backend.
"""
base = "ssh -t " + options["username"] + "@" + options["server"]
screen = " screen -r "
if self.device_type == "Wireless_access_point":
screen += "WAP_%d" % self.getID()
else:
name = self.getName()
pid = mainWidgets["tm"].getPID(name)
if not pid:
return
screen += pid + "." + name
command = ""
window_name = str(self.getProperty("name")) # the strcast is necessary for cloning
if(self.getName() != window_name):
window_name += " (" + self.getName() + ")"
if environ["os"] == "Windows":
startpath = environ["tmp"] + self.getName() + ".start"
try:
outfile = open(startpath, "w")
outfile.write(screen)
outfile.close()
except:
mainWidgets["log"].append("Failed to write to start file!")
return
command += "putty -"
if options["session"]:
command += "load " + options["session"] + " -l " + options["username"] + " -t"
else:
command += base
command += " -m \"" + startpath + "\""
else:
command += "xterm -T \"" + window_name + "\" -e \"" + base + screen + "\""
self.shell = subprocess.Popen(str(command), shell=True)#ShellStarter(command)
#self.shell.start()
def addInterface(self, node):
"""
Add an interface to the list of interfaces with node as target.
"""
for interface in self.interfaces:
if interface[QtCore.QString("target")] == node:
return
self.interfaces.append({
QtCore.QString("target"):node,
QtCore.QString("ipv4"):QtCore.QString(""),
QtCore.QString("mac"):QtCore.QString(""),
QtCore.QString("routing"):[]})
def removeInterface(self, node):
"""
Remove the interface from the list of interfaces where node is the target.
"""
interface = None
for interface in self.interfaces:
if interface[QtCore.QString("target")] == node:
break
interface = None
if interface:
self.interfaces.remove(interface)
def getInterfaces(self):
"""
Return the list of interfaces.
"""
return self.interfaces
def getInterface(self, node=None, subnet=None):
"""
Return an interface from the list of interfaces specified by node or subnet.
"""
if not node and not subnet:
return self.interfaces[0]
elif subnet:
for interface in self.interfaces:
if interface[QtCore.QString("subnet")] == subnet:
return interface
else:
for interface in self.interfaces:
if interface[QtCore.QString("target")] == node:
return interface
def getInterfaceProperty(self, propName, node=None, subnet=None, index=0):
"""
Return an interface property specified by node or subnet.
"""
if not node and not subnet:
return self.interfaces[index][QtCore.QString(propName)]
interface = self.getInterface(node, subnet)
if interface:
return interface[QtCore.QString(propName)]
def setInterfaceProperty(self, prop, value, node=None, subnet=None, index=0):
"""
Set an interface property specified by node or subnet.
"""
if not node and not subnet:
self.interfaces[index][QtCore.QString(prop)] = QtCore.QString(value)
else:
interface = self.getInterface(node, subnet)
if not interface:
return
interface[QtCore.QString(prop)] = QtCore.QString(value)
if prop == "ipv4":
self.generateToolTip()
def getTable(self, node=None):
"""
Return the route table from the interface specified by node.
"""
return self.getInterfaceProperty("routing", node)
def getEntry(self, subnet, target):
"""
Return an entry from the route table specified by subnet and target.
"""
table = self.getInterfaceProperty("routing", target)
for entry in table:
if entry[QtCore.QString("subnet")] == subnet:
return entry
def getEntryProperty(self, prop, subnet, target):
"""
Return a property from the entry specified by subnet and target.
"""
entry = self.getEntry(subnet, target)
return entry[QtCore.QString(prop)]
def setEntryProperty(self, prop, value, subnet, target):
"""
Set a property from the entry specified by subnet and target.
"""
entry = self.getEntry(subnet, target)
entry[QtCore.QString(prop)] = value
def addEntry(self, mask, gateway, subnet, target):
"""
Add an entry to the table specified by subnet and target.
"""
entry = {QtCore.QString("netmask"):mask, QtCore.QString("gw"):gateway, QtCore.QString("subnet"):subnet}
table = self.getTable(target)
table.append(entry)
def removeEntry(self, entry, target):
"""
Remove an entry from the table specified by subnet and target.
"""
table = self.getTable(target)
table.remove(entry)
def addAdjacentRouter(self, router, interface):
"""
Add a router to the list of adjacent ones for route computations.
"""
self.adjacentRouterList.append([router, interface])
def getAdjacentRouters(self):
"""
Return the list of adjacent routers.
"""
return self.adjacentRouterList
def addAdjacentSubnet(self, subnet):
"""
Add a subnet to the list of adjacent ones for route computations.
"""
self.adjacentSubnetList.append(subnet)
def getAdjacentSubnets(self):
"""
Return the list of adjacent subnets.
"""
return self.adjacentSubnetList
def emptyAdjacentLists(self):
"""
Clear the list of adjacent routers and subnets.
"""
self.adjacentRouterList = []
self.adjacentSubnetList = []
def emptyRouteTable(self):
"""
Clear the route table.
"""
for interface in self.interfaces:
interface[QtCore.QString("routing")] = []
def hasSubnet(self, subnet):
"""
Check if the specified subnet is in the adjacent list.
"""
for sub in self.adjacentSubnetList:
if sub == subnet:
return True
return False
def searchSubnet(self, subnet):
"""
Search the specified subnet in the whole network.
"""
routerList=self.adjacentRouterList[:]
# Save all found routers in the list, so that we don't visit a router twice
foundList=[]
for r in routerList:
foundList.append(r[0])
while len(routerList) > 0:
theOne = routerList.pop(0)
if theOne[0].hasSubnet(subnet):
return (theOne[0], theOne[1])
else:
# Add its adjacent router list to the list
for router, interface in theOne[0].getAdjacentRouters():
# Check if the router is already visited or is in the to be visited list
if not router in foundList:
newOne = [router, theOne[1]]
routerList.append(newOne)
foundList.append(router)
return (None, None)
def addRoutingEntry(self, subnet):
"""
Add an entry to the route table.
"""
if not self.hasSubnet(subnet):
device, interface = self.searchSubnet(subnet)
if interface:
target = interface[QtCore.QString("target")]
if interface[QtCore.QString("subnet")] == subnet \
and self.device_type == "UML" or self.device_type == "REALM":
self.addEntry(interface[QtCore.QString("mask")],
"",
" ",
target)
elif interface[QtCore.QString("subnet")] == subnet \
and self.device_type == "REALM":
self.addEntry(interface[QtCore.QString("mask")],
"",
" ",
target)
else:
if target.device_type == "Switch":
# interfaceable = target.getTarget(self)
# gateway = interfaceable.getInterface(target)[QtCore.QString("ipv4")]
gateway = target.getGateway()
else:
gateway = target.getInterface(self)[QtCore.QString("ipv4")]
self.addEntry(interface[QtCore.QString("mask")],
gateway,
subnet,
target)
else:
if self.device_type == "Router":
interface = self.getInterface(None, subnet)
self.addEntry(interface[QtCore.QString("mask")],
"0.0.0.0",
subnet,
interface[QtCore.QString("target")])
def toString(self):
"""
Reimplemented to provide route information.
"""
devInfo = Device.toString(self)
interfaceInfo = ""
for interface in self.interfaces:
if interface.has_key(QtCore.QString("target")):
interfaceInfo += "\t\tinterface:" + interface[QtCore.QString("target")].getName() + "\n"
else:
interfaceInfo += "\t\twireless interface:\n"
for prop, value in interface.iteritems():
if prop == "target":
pass
elif prop == "routing":
for route in value:
interfaceInfo += "\t\t\t\troute:" + route[QtCore.QString("subnet")] + "\n"
for pr, val in route.iteritems():
if pr != "subnet":
interfaceInfo += "\t\t\t\t\t" + pr + ":" + val + "\n"
else:
interfaceInfo += "\t\t\t" + prop + ":" + value + "\n"
return devInfo + interfaceInfo
| {
"repo_name": "anrl/gini",
"path": "frontend/src/gbuilder/Core/Interfaceable.py",
"copies": "3",
"size": "12118",
"license": "mit",
"hash": -6877220150569378000,
"line_mean": 35.1717791411,
"line_max": 111,
"alpha_frac": 0.501898003,
"autogenerated": false,
"ratio": 4.849139655862345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006446202727704456,
"num_lines": 326
} |
"""A D* experiment with 28x28 grid.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
import cPickle as pickle
import numpy as np
import rlvision
from rlvision import utils
from rlvision.grid import GridSampler
from rlvision.utils import process_map_data
from rlvision.dstar import Dstar
# general parameters
n_samples = 100 # use limited data
n_steps = 56 # twice much as the step
save_model = True # if true, all data will be saved for future use
enable_vis = False # if true, real time visualization will be enable
# setup result folder
file_name = os.path.join(rlvision.RLVISION_DATA,
"chain_data", "grid28_with_idx.pkl")
im_data, state_data, label_data, sample_idx = process_map_data(
file_name, return_full=True)
sampler = GridSampler(im_data, state_data, label_data, sample_idx, (28, 28))
gt_collector = []
po_collector = []
diff_collector = []
print ("[MESSAGE] EXPERIMENT STARTED!")
for grid_idx in xrange(0, 20742, 7):
# get a grid
grid, state, label, goal = sampler.get_grid(grid_idx)
gt_collector.append(state)
# define step map
grid = 1-grid[0]
step_map = np.ones((28, 28), dtype=np.uint8)
pos = [state[0, 1], state[0, 0]]
path = [(pos[0], pos[1])]
planner = Dstar(path[0], (goal[1], goal[0]),
step_map.flatten(), (28, 28))
for setp in xrange(n_steps):
# masked image
masked_img, coord = utils.mask_grid(pos,
grid, 3, one_is_free=True)
# # step image
step_map[coord[0], coord[1]] = grid[coord[0], coord[1]]
# step_map = utils.accumulate_map(step_map, masked_img)
change = np.where(np.logical_xor(
planner.grid, step_map.flatten()))[0]
block_list = np.unravel_index(change, planner.imsize)
print (block_list)
for idx in xrange(block_list[0].shape[0]):
planner.add_obstacle(block_list[0][idx], block_list[1][idx])
errors, next_move = planner.replan()
planner.reset_start_pos(next_move)
if not errors and enable_vis:
utils.plot_grid(step_map, (28, 28),
start=(path[0][1], path[0][0]),
pos=path[1:],
goal=(goal[0], goal[1]))
# collect new action
pos[0] = next_move[0]
pos[1] = next_move[1]
path.append((pos[1], pos[0]))
if pos[0] == goal[1] and pos[1] == goal[0]:
print ("[MESSAGE] FOUND THE PATH %i" % (grid_idx+1))
break
po_collector.append(path)
diff_collector.append(abs(len(path)-1-state.shape[0]))
print ("[MESSAGE] Diff %i" % (diff_collector[-1]))
planner.kill_subprocess()
data = {}
data['gt'] = gt_collector
data['po'] = po_collector
data['diff'] = diff_collector
with open("grid_28_dstar_result", "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
| {
"repo_name": "ToniRV/Learning-to-navigate-without-a-map",
"path": "rlvision/exps/dstar_28_exp.py",
"copies": "1",
"size": "3008",
"license": "mit",
"hash": -2841952516788036600,
"line_mean": 30.6631578947,
"line_max": 76,
"alpha_frac": 0.5950797872,
"autogenerated": false,
"ratio": 3.203407880724175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4298487667924175,
"avg_score": null,
"num_lines": null
} |
"""A D* experiment with 8x8 grid.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
import cPickle as pickle
import numpy as np
import rlvision
from rlvision import utils
from rlvision.grid import GridSampler
from rlvision.utils import process_map_data
from rlvision.dstar import Dstar
# general parameters
n_samples = 100 # use limited data
n_steps = 16 # twice much as the step
save_model = True # if true, all data will be saved for future use
enable_vis = False # if true, real time visualization will be enable
# setup result folder
file_name = os.path.join(rlvision.RLVISION_DATA,
"chain_data", "grid8_with_idx.pkl")
im_data, state_data, label_data, sample_idx = process_map_data(
file_name, return_full=True)
sampler = GridSampler(im_data, state_data, label_data, sample_idx, (8, 8))
gt_collector = []
po_collector = []
diff_collector = []
print ("[MESSAGE] EXPERIMENT STARTED!")
for grid_idx in xrange(0, len(sample_idx), 7):
# get a grid
grid, state, label, goal = sampler.get_grid(grid_idx)
gt_collector.append(state)
# define step map
grid = 1-grid[0]
step_map = np.ones((8, 8), dtype=np.uint8)
pos = [state[0, 1], state[0, 0]]
path = [(pos[0], pos[1])]
planner = Dstar(path[0], (goal[1], goal[0]),
step_map.flatten(), (8, 8))
for setp in xrange(n_steps):
# masked image
masked_img, coord = utils.mask_grid(pos,
grid, 3, one_is_free=True)
# # step image
step_map[coord[0], coord[1]] = grid[coord[0], coord[1]]
# step_map = utils.accumulate_map(step_map, masked_img)
change = np.where(np.logical_xor(
planner.grid, step_map.flatten()))[0]
block_list = np.unravel_index(change, planner.imsize)
print (block_list)
for idx in xrange(block_list[0].shape[0]):
planner.add_obstacle(block_list[0][idx], block_list[1][idx])
errors, next_move = planner.replan()
planner.reset_start_pos(next_move)
if not errors and enable_vis:
utils.plot_grid(step_map, (8, 8),
start=(path[0][1], path[0][0]),
pos=path[1:],
goal=(goal[0], goal[1]))
# collect new action
pos[0] = next_move[0]
pos[1] = next_move[1]
path.append((pos[1], pos[0]))
if pos[0] == goal[1] and pos[1] == goal[0]:
print ("[MESSAGE] FOUND THE PATH %i" % (grid_idx+1))
break
po_collector.append(path)
diff_collector.append(abs(len(path)-1-state.shape[0]))
print ("[MESSAGE] Diff %i" % (diff_collector[-1]))
planner.kill_subprocess()
data = {}
data['gt'] = gt_collector
data['po'] = po_collector
data['diff'] = diff_collector
with open("grid_8_dstar_result", "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
| {
"repo_name": "ToniRV/Learning-to-navigate-without-a-map",
"path": "rlvision/exps/dstar_8_exp.py",
"copies": "1",
"size": "3006",
"license": "mit",
"hash": -6544320378367725000,
"line_mean": 30.6421052632,
"line_max": 74,
"alpha_frac": 0.5938123752,
"autogenerated": false,
"ratio": 3.1910828025477707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42848951777477706,
"avg_score": null,
"num_lines": null
} |
# A dfs-based algorithm that finds the largest connected components of a graph, and prints out the largest 5 of them
import time
start = time.time()
time.clock()
graph = '2.txt'
# graph = 'scc.txt'
# assuming that the edge list is sorted, this gets its number of nodes
num_lines = sum(1 for line in open(graph))
with open(graph) as file:
num_nodes = int((file.readlines()[num_lines-1]).split()[0])
edge_list = [[] for x in range(0,num_nodes)]
reversed_edge_list = [[] for x in range(0,num_nodes)]
with open(graph) as file:
for line in file:
u, v = line.split()
u = int(u)
v = int(v)
# adj_matrix[u-1, v-1] = 1
edge_list[u-1].append(v-1)
reversed_edge_list[v-1].append(u-1)
print("edge list done!")
# each node is assigned a number, its leader, its finishing time and its neighboring edges
class Node():
def __init__(self):
self.number = ""
self.leader = ""
self.finishing_time = ""
# self.out_edges = []
self.not_explored_edges = []
self.explored_indicator = False
graph_nodes_numbers = [x for x in range(0,num_nodes)]
graph_nodes = [x for x in range(0,num_nodes)]
# these vars are global since used both in dfs loop and in dfs
graph_explored = []
finishing_time = 0
leader = 0
edges_to_explore = []
# not recursive since it had stack overflow problems
def dfs(g, node):
global finishing_time, graph_explored, edges_to_explore, graph_nodes
graph_to_explore = [node]
graph_explored.append(node)
graph_nodes[node].explored_indicator = True
graph_nodes[node].leader = leader
edges_to_explore = g
while graph_to_explore:
last_node = graph_to_explore[-1]
not_explored_edges_of_a_node = 0
if edges_to_explore[last_node]:
for out_node in edges_to_explore[last_node]:
# print(edges_to_explore[last_node])
if not graph_nodes[out_node].explored_indicator and not_explored_edges_of_a_node == 0:
graph_explored.append(out_node)
graph_nodes[out_node].explored_indicator = True
graph_to_explore.append(out_node)
not_explored_edges_of_a_node += 1
edges_to_explore[last_node].remove(out_node)
if not_explored_edges_of_a_node == 0:
# and graph_nodes[graph_to_explore[-1]].leader == ""
graph_nodes[graph_to_explore[-1]].leader = node
graph_nodes[graph_to_explore.pop()].finishing_time = finishing_time
finishing_time += 1
# elif not_explored_edges_of_a_node == 0 and graph_nodes[graph_to_explore[-1]].leader != "":
# graph_to_explore.pop()
elif graph_nodes[graph_to_explore[-1]].leader != graph_to_explore[-1]:
graph_nodes[graph_to_explore[-1]].leader = node
graph_nodes[graph_to_explore.pop()].finishing_time = finishing_time
finishing_time += 1
else:
graph_explored.append(node)
graph_nodes[node].explored_indicator = True
graph_nodes[graph_to_explore[-1]].leader = node
graph_nodes[graph_to_explore.pop()].finishing_time = finishing_time
finishing_time += 1
def dfsl(g, ordered_nodes):
global finishing_time, leader, graph_explored, graph_nodes
graph_explored = []
for x in graph_nodes:
x.explored_indicator = False
edges_to_explore = []
finishing_time = 0
count = 0
new_time = time.clock()
for node in reversed(ordered_nodes):
if not graph_nodes[node].explored_indicator:
count += 1
if count % 1000 == 0:
print(count)
time_diff = time.clock() - new_time
new_time = time.clock()
print(time_diff)
leader = node
dfs(g, node)
for x in graph_nodes:
graph_nodes[x] = Node()
graph_nodes[x].number = x
dfsl_time_start = time.time()
dfsl(reversed_edge_list, graph_nodes_numbers)
dfsl_time = time.time() - dfsl_time_start
di = dict(zip(graph_nodes_numbers,[node.finishing_time for node in graph_nodes]))
nodes_ordered_by_finishing_time = [edge[0] for edge in sorted(di.items(), key=lambda x:x[1])]
dfsl(edge_list, nodes_ordered_by_finishing_time)
leaders = {}
for node in graph_nodes_numbers:
if graph_nodes[node].leader in leaders:
leaders[graph_nodes[node].leader] += 1
else:
leaders[graph_nodes[node].leader] = 1
print(sorted(leaders.values(), reverse=True)[:5])
end = time.time() - start
print(end)
print(dfsl_time) | {
"repo_name": "guzey/coursera-algorithms",
"path": "week2-1-kosaraju/kosaraju_ugly.py",
"copies": "1",
"size": "4630",
"license": "mit",
"hash": -4776396129258308000,
"line_mean": 34.6230769231,
"line_max": 116,
"alpha_frac": 0.6084233261,
"autogenerated": false,
"ratio": 3.290689410092395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4399112736192395,
"avg_score": null,
"num_lines": null
} |
# Ad-hoc algorithm for copy-move forgery detection in images.
# Implemented by - vasiliauskas.agnius@gmail.com
# Robust match algorithm steps:
# 1. Blur image for eliminating image details
# 2. Convert image to degraded palette
# 3. Decompose image into small NxN pixel blocks
# 4. Alphabetically order these blocks by their pixel values
# 5. Extract only these adjacent blocks which have small absolute color difference
# 6. Cluster these blocks into clusters by intersection area among blocks
# 7. Extract only these clusters which are bigger than block size
# 8. Extract only these clusters which have similar cluster, by using some sort of similarity function (in this case Hausdorff distance between clusters)
# 9. Draw discovered similar clusters on image
import sys
from PIL import Image, ImageFilter, ImageDraw
import operator as op
from optparse import OptionParser
def Dist(p1,p2):
"""
Euclidean distance between 2 points
"""
x1, y1 = p1
x2, y2 = p2
return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5
def intersectarea(p1,p2,size):
"""
Given 2 boxes, this function returns intersection area
"""
x1, y1 = p1
x2, y2 = p2
ix1, iy1 = max(x1,x2), max(y1,y2)
ix2, iy2 = min(x1+size,x2+size), min(y1+size,y2+size)
iarea = abs(ix2-ix1)*abs(iy2-iy1)
if iy2 < iy1 or ix2 < ix1: iarea = 0
return iarea
def Hausdorff_distance(clust1, clust2, forward, dir):
"""
Function measures distance between 2 sets. (Some kind of non-similarity between 2 sets if you like).
It is modified Hausdorff distance, because instead of max distance - average distance is taken.
This is done for function being more error-prone to cluster coordinates.
"""
if forward == None:
return max(Hausdorff_distance(clust1,clust2,True,dir),Hausdorff_distance(clust1,clust2,False,dir))
else:
clstart, clend = (clust1,clust2) if forward else (clust2,clust1)
dx, dy = dir if forward else (-dir[0],-dir[1])
return sum([min([Dist((p1[0]+dx,p1[1]+dy),p2) for p2 in clend]) for p1 in clstart])/len(clstart)
def hassimilarcluster(ind, clusters):
"""
For given cluster tells does it have twin cluster in image or not.
"""
item = op.itemgetter
global opt
found = False
tx = min(clusters[ind],key=item(0))[0]
ty = min(clusters[ind],key=item(1))[1]
for i, cl in enumerate(clusters):
if i != ind:
cx = min(cl,key=item(0))[0]
cy = min(cl,key=item(1))[1]
dx, dy = cx - tx, cy - ty
specdist = Hausdorff_distance(clusters[ind],cl,None,(dx,dy))
if specdist <= int(opt.rgsim):
found = True
break
return found
def blockpoints(pix, coords, size):
"""
Generator of pixel colors of given block.
"""
xs, ys = coords
for x in range(xs,xs+size):
for y in range(ys,ys+size):
yield pix[x,y]
def colortopalette(color, palette):
"""
Convert given color into palette color.
"""
for a,b in palette:
if color >= a and color < b:
return b
def imagetopalette(image, palcolors):
"""
Convert given image into custom palette colors
"""
assert image.mode == 'L', "Only grayscale images supported !"
pal = [(palcolors[i],palcolors[i+1]) for i in range(len(palcolors)-1)]
image.putdata([colortopalette(c,pal) for c in list(image.getdata())])
def getparts(image, block_len):
"""
Decompose given image into small blocks of data.
"""
img = image.convert('L') if image.mode != 'L' else image
w, h = img.size
parts = []
# Bluring image for abandoning image details and noise.
global opt
for n in range(int(opt.imblev)):
img = img.filter(ImageFilter.SMOOTH_MORE)
# Converting image to custom palette
imagetopalette(img, [x for x in range(256) if x%int(opt.impalred) == 0])
pix = img.load()
for x in range(w-block_len):
for y in range(h-block_len):
data = list(blockpoints(pix, (x,y), block_len)) + [(x,y)]
parts.append(data)
parts = sorted(parts)
return parts
def similarparts(imagparts):
"""
Return only these blocks which are similar by content.
"""
dupl = []
global opt
l = len(imagparts[0])-1
for i in range(len(imagparts)-1):
difs = sum(abs(x-y) for x,y in zip(imagparts[i][:l],imagparts[i+1][:l]))
mean = float(sum(imagparts[i][:l])) / l
dev = float(sum(abs(mean-val) for val in imagparts[i][:l])) / l
if dev/mean >= float(opt.blcoldev):
if difs <= int(opt.blsim):
if imagparts[i] not in dupl:
dupl.append(imagparts[i])
if imagparts[i+1] not in dupl:
dupl.append(imagparts[i+1])
return dupl
def clusterparts(parts, block_len):
"""
Further filtering out non essential blocks.
This is done by clustering blocks at first and after that
filtering out small clusters and clusters which doesn`t have
twin cluster in image.
"""
parts = sorted(parts, key=op.itemgetter(-1))
global opt
clusters = [[parts[0][-1]]]
# assign all parts to clusters
for i in range(1,len(parts)):
x, y = parts[i][-1]
# detect box already in cluster
fc = []
for k,cl in enumerate(clusters):
for xc,yc in cl:
ar = intersectarea((xc,yc),(x,y),block_len)
intrat = float(ar)/(block_len*block_len)
if intrat > float(opt.blint):
if not fc: clusters[k].append((x,y))
fc.append(k)
break
# if this is new cluster
if not fc:
clusters.append([(x,y)])
else:
# re-clustering boxes if in several clusters at once
while len(fc) > 1:
clusters[fc[0]] += clusters[fc[-1]]
del clusters[fc[-1]]
del fc[-1]
item = op.itemgetter
# filter out small clusters
clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]
# filter out clusters, which doesn`t have identical twin cluster
clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]
return clusters
def marksimilar(image, clust, size):
"""
Draw discovered similar image regions.
"""
global opt
blocks = []
if clust:
draw = ImageDraw.Draw(image)
mask = Image.new('RGB', (size,size), 'cyan')
for cl in clust:
for x,y in cl:
im = image.crop((x,y,x+size,y+size))
im = Image.blend(im,mask,0.5)
blocks.append((x,y,im))
for bl in blocks:
x,y,im = bl
image.paste(im,(x,y,x+size,y+size))
if int(opt.imauto):
for cl in clust:
cx1 = min([cx for cx,cy in cl])
cy1 = min([cy for cx,cy in cl])
cx2 = max([cx for cx,cy in cl]) + block_len
cy2 = max([cy for cx,cy in cl]) + block_len
draw.rectangle([cx1,cy1,cx2,cy2],outline="magenta")
return image
if __name__ == '__main__':
cmd = OptionParser("usage: %prog image_file [options]")
cmd.add_option('', '--imauto', help='Automatically search identical regions. (default: %default)', default=1)
cmd.add_option('', '--imblev',help='Blur level for degrading image details. (default: %default)', default=8)
cmd.add_option('', '--impalred',help='Image palette reduction factor. (default: %default)', default=15)
cmd.add_option('', '--rgsim', help='Region similarity threshold. (default: %default)', default=5)
cmd.add_option('', '--rgsize',help='Region size threshold. (default: %default)', default=1.5)
cmd.add_option('', '--blsim', help='Block similarity threshold. (default: %default)',default=200)
cmd.add_option('', '--blcoldev', help='Block color deviation threshold. (default: %default)', default=0.2)
cmd.add_option('', '--blint', help='Block intersection threshold. (default: %default)', default=0.2)
opt, args = cmd.parse_args()
if not args:
cmd.print_help()
sys.exit()
print 'Analyzing image, please wait... (can take some minutes)'
block_len = 15
im = Image.open(args[0])
lparts = getparts(im, block_len)
dparts = similarparts(lparts)
cparts = clusterparts(dparts, block_len) if int(opt.imauto) else [[elem[-1] for elem in dparts]]
im = marksimilar(im, cparts, block_len)
out = args[0].split('.')[0] + '_analyzed.jpg'
im.save(out)
print 'Done. Found', len(cparts) if int(opt.imauto) else 0, 'identical regions'
print 'Output is saved in file -', out
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/576689_Copymove_forgery/recipe-576689.py",
"copies": "1",
"size": "7950",
"license": "mit",
"hash": -7362409823070134000,
"line_mean": 32.829787234,
"line_max": 196,
"alpha_frac": 0.6779874214,
"autogenerated": false,
"ratio": 3.0136467020470055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41916341234470056,
"avg_score": null,
"num_lines": null
} |
"""adhoc filters
Revision ID: bddc498dd179
Revises: afb7730f6a9c
Create Date: 2018-06-13 14:54:47.086507
"""
# revision identifiers, used by Alembic.
revision = 'bddc498dd179'
down_revision = '80a67c5192fa'
from collections import defaultdict
import json
import uuid
from alembic import op
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text
from superset import db
from superset import utils
Base = declarative_base()
class Slice(Base):
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
params = Column(Text)
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
mapping = {'having': 'having_filters', 'where': 'filters'}
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if not 'adhoc_filters' in params:
params['adhoc_filters'] = []
for clause, filters in mapping.items():
if clause in params and params[clause] != '':
params['adhoc_filters'].append({
'clause': clause.upper(),
'expressionType': 'SQL',
'filterOptionName': str(uuid.uuid4()),
'sqlExpression': params[clause],
})
if filters in params:
for filt in params[filters]:
params['adhoc_filters'].append({
'clause': clause.upper(),
'comparator': filt['val'],
'expressionType': 'SIMPLE',
'filterOptionName': str(uuid.uuid4()),
'operator': filt['op'],
'subject': filt['col'],
})
for key in ('filters', 'having', 'having_filters', 'where'):
if key in params:
del params[key]
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
utils.split_adhoc_filters_into_base_filters(params)
if 'adhoc_filters' in params:
del params['adhoc_filters']
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
| {
"repo_name": "timifasubaa/incubator-superset",
"path": "superset/migrations/versions/bddc498dd179_adhoc_filters.py",
"copies": "2",
"size": "2689",
"license": "apache-2.0",
"hash": 8186284493724957000,
"line_mean": 26.7216494845,
"line_max": 72,
"alpha_frac": 0.5184083302,
"autogenerated": false,
"ratio": 4.386623164763458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0039014938611532063,
"num_lines": 97
} |
"""Ad-hoc implementation of quaternion operators using numpy.
Copyright (c) 2014, Garth Zeglin. All rights reserved. Licensed under the terms
of the BSD 3-clause license as included in LICENSE.
All quaternions are represented as a 4 element numpy ndarray, following a (w,x,y,z) convention.
All operators are pure Python for portability.
N.B. this is not well tested, efficient, or optimized.
Quaternion addition is just the usual vector sum: q1 + q2
"""
import numpy as np
def axis_angle( axis, angle ):
"""Return the quaternion corresponding to a rotation around an axis.
Arguments: (axis, angle)
axis -- three element vector
angle -- scalar representing an angle in radians.
"""
# the scale factor normalizes the axis vector and multiplies by half the sine in one step
scale = np.sin( angle / 2 ) / np.sqrt(np.dot(axis, axis))
return np.array(( np.cos(angle/2), scale*axis[0], scale*axis[1], scale*axis[2] ))
def vector( v ):
"""Return the quaternion representing a three-element vector."""
return np.array(( 0, v[0], v[1], v[2] ))
def identity():
"""Return an identity quaternion representing no rotation."""
return np.array(( 1.0, 0.0, 0.0, 0.0 ))
def normalize( q ):
"""Return a properly normalized quaternion.
Rotation quaternions have unit magnitude, but numerical error can accumulate.
"""
mag = np.sqrt( np.dot(q,q) )
if mag == 0:
return identity()
else:
return q / mag
def conjugate( q ):
"""Return the conjugate of a quaternion."""
return np.array(( q[0], -q[1], -q[2], -q[3] ))
def multiply( p, q ):
"""Compute the quaternion product p*q.
The product p * q = p0q0 - p.q + p0 q + q0 p + p X q
"""
return np.array (( p[0] * q[0] - p[1] * q[1] - p[2] * q[2] - p[3] * q[3],
p[0] * q[1] + p[1] * q[0] + (p[2] * q[3])-(p[3] * q[2]),
p[0] * q[2] + p[2] * q[0] + (p[3] * q[1])-(p[1] * q[3]),
p[0] * q[3] + p[3] * q[0] + (p[1] * q[2])-(p[2] * q[1]) ))
def rotate_vector( q, v ):
"""Rotate a three-element vector v by the orientation represented by quaternion q.
Arguments: (q, v)
Returns a new vector.
"""
r = multiply( multiply(q, vector(v)), conjugate( q ))
return r[1:4]
def to_threexform( q ):
"""Return a 4x4 homogenous matrix representing a quaternion rotation."""
# this should be fixed to work with a matrix of quaternions
return np.array( (( q[0]*q[0]+q[1]*q[1]-q[2]*q[2]-q[3]*q[3],
2*(q[1]*q[2]-q[0]*q[3]),
2*(q[1]*q[3]+q[0]*q[2]),
0.0 ),
( 2*(q[1]*q[2]+q[0]*q[3]),
q[0]*q[0]-q[1]*q[1]+q[2]*q[2]-q[3]*q[3],
2*(q[2]*q[3]-q[0]*q[1]),
0.0 ),
( 2*(q[1]*q[3]-q[0]*q[2]),
2*(q[2]*q[3]+q[0]*q[1]),
q[0]*q[0]-q[1]*q[1]-q[2]*q[2]+q[3]*q[3],
0.0 ),
( 0, 0, 0, 1 ) ))
| {
"repo_name": "CMU-dFabLab/dfab",
"path": "python/dfab/geometry/quaternion.py",
"copies": "1",
"size": "3137",
"license": "bsd-3-clause",
"hash": 4134244651876270600,
"line_mean": 30.0594059406,
"line_max": 95,
"alpha_frac": 0.5164169589,
"autogenerated": false,
"ratio": 3.025072324011572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4041489282911572,
"avg_score": null,
"num_lines": null
} |
# Ad hoc script to fix problem of legacy illumina files
# read 1 and read 2 are in the same file,
# read 1 first, but in random order and with orphans
# the script pairs up the reads into two separate outputfiles
# restoring the order
# orphans go into a separate file
# run as python split_in_pairs.py file.fastq
import sys, os
import gzip
from Bio.SeqIO.QualityIO import FastqGeneralIterator
def write_pair(readID, read1, read2, fh_out_R1, fh_out_R2):
"""Write paired reads to two files"""
fh_out_R1.write("@%s\n%s\n+\n%s\n" % (readID + "/1", read1[0], read1[1]))
fh_out_R2.write("@%s\n%s\n+\n%s\n" % (readID + "/2", read2[0], read2[1]))
pass
def write_orph(readID, read, fh_out_orphan):
"""Write orphan read to separate file"""
fh_out_orphan.write("@%s\n%s\n+\n%s\n" % (readID, read[0], read[1]))
pass
fastqIn = sys.argv[1]
# create output filenames
# Input: sometext.fq.gz or sometext.fq
# Out read 1: sometext.R1.fq
# Out read 1: sometext.R2.fq
# Out orphan reads: sometext.orhan.fq
fileName = os.path.basename(fastqIn.rstrip(".gz"))
fileOutname, ext = os.path.splitext(fileName)
fastqOutR1 = "%s.R1%s" %(fileOutname, ext)
fastqOutR2 = "%s.R2%s" %(fileOutname, ext)
fastqOutOrph = "%s.orphan%s" %(fileOutname, ext)
# determine whether gzipped
# open file appropriately
if fastqIn.endswith(".gz"):
fh_in = gzip.open(fastqIn, 'r')
else:
fh_in = open(fastqIn, 'r')
# dictionary to keep encountered reads
reads1 = {}
reads2 = {}
# open output files
fh_out_R1 = open(fastqOutR1, 'wb')
fh_out_R2 = open(fastqOutR2, 'wb')
# parse input fastq file
for title, seq, qual in FastqGeneralIterator(fh_in):
pairID = title[-2:]
readID = title[:-2]
if pairID == "/1":
# read 1
# seen? then write as paired
if readID in reads2:
read1 = [seq, qual]
read2 = reads2[readID]
write_pair(readID, read1, read2, fh_out_R1, fh_out_R2)
# remove from dictionary
del reads2[readID]
else:
# add to dictionary
reads1[readID] = [seq, qual]
elif pairID == "/2":
# read 2
# seen? then write as paired
if readID in reads1:
read1 = reads1[readID]
read2 = [seq, qual]
write_pair(readID, read1, read2, fh_out_R1, fh_out_R2)
# remove from dictionary
del reads1[readID]
else:
# add to dictionary
reads2[readID] = [seq, qual]
else:
raise Exception("Read identifier does not end in /1 or /2: %s" %title)
fh_in.close()
fh_out_R1.close()
fh_out_R2.close()
# print out orphans, i.e. the remaining reads in the dictionaries
fh_out_orphan = open(fastqOutOrph, 'wb')
for readID in reads1:
write_orph(readID + "/1", reads1[readID], fh_out_orphan)
for readID in reads2:
write_orph(readID + "/2", reads2[readID], fh_out_orphan)
fh_out_orphan.close()
| {
"repo_name": "lexnederbragt/sequencetools",
"path": "split_in_pairs.py",
"copies": "1",
"size": "2939",
"license": "unlicense",
"hash": 7844279014013377000,
"line_mean": 30.2659574468,
"line_max": 78,
"alpha_frac": 0.6230010208,
"autogenerated": false,
"ratio": 2.924378109452736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.902423854032214,
"avg_score": 0.004628117986119306,
"num_lines": 94
} |
""" adiabatic tapers from CSV files
"""
import pathlib
from pathlib import Path
from typing import Tuple
import pp
from pp.component import Component
from pp.load_csv import load_csv
data_path = pathlib.Path(__file__).parent / "csv_data"
def taper_from_csv(
csv_path: Path,
wg_layer: int = 1,
clad_offset: int = 3,
clad_layer: Tuple[int, int] = pp.LAYER.WGCLAD,
) -> Component:
taper_data = load_csv(csv_path)
# taper_data = pd.read_csv(csv_path)
# print(taper_data)
xs = taper_data["x"] * 1e6
ys = taper_data["width"] * 1e6 / 2.0
ys_trench = ys + clad_offset
c = pp.Component()
c.add_polygon(list(zip(xs, ys)) + list(zip(xs, -ys))[::-1], layer=wg_layer)
c.add_polygon(
list(zip(xs, ys_trench)) + list(zip(xs, -ys_trench))[::-1], layer=clad_layer
)
c.add_port(
name="W0",
midpoint=(xs[0], 0),
width=2 * ys[0],
orientation=180,
port_type="optical",
)
c.add_port(
name="E0",
midpoint=(xs[-1], 0),
width=2 * ys[-1],
orientation=0,
port_type="optical",
)
return c
@pp.cell_with_validator
def taper_0p5_to_3_l36(**kwargs) -> Component:
csv_path = data_path / "taper_strip_0p5_3_36.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.cell_with_validator
def taper_w10_l100(**kwargs):
csv_path = data_path / "taper_strip_0p5_10_100.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.cell_with_validator
def taper_w10_l150(**kwargs):
csv_path = data_path / "taper_strip_0p5_10_150.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.cell_with_validator
def taper_w10_l200(**kwargs):
csv_path = data_path / "taper_strip_0p5_10_200.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.cell_with_validator
def taper_w11_l200(**kwargs):
csv_path = data_path / "taper_strip_0p5_11_200.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.cell_with_validator
def taper_w12_l200(**kwargs):
csv_path = data_path / "taper_strip_0p5_12_200.csv"
return taper_from_csv(csv_path, **kwargs)
if __name__ == "__main__":
c = taper_0p5_to_3_l36()
# c = taper_w10_l100()
# c = taper_w11_l200()
c.show()
| {
"repo_name": "gdsfactory/gdsfactory",
"path": "pp/components/taper_from_csv.py",
"copies": "1",
"size": "2218",
"license": "mit",
"hash": -4403924633935120400,
"line_mean": 23.6444444444,
"line_max": 84,
"alpha_frac": 0.6091073039,
"autogenerated": false,
"ratio": 2.6217494089834514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37308567128834513,
"avg_score": null,
"num_lines": null
} |
""" adiabatic tapers from CSV files
"""
import pathlib
import pp
data_path = pathlib.Path(__file__).parent / "csv_data"
def taper_from_csv(csv_path, wg_layer=1, clad_offset=3, clad_layer=pp.LAYER.WGCLAD):
taper_data = pp.load_csv(csv_path)
# taper_data = pd.read_csv(csv_path)
# print(taper_data)
xs = taper_data["x"] * 1e6
ys = taper_data["width"] * 1e6 / 2.0
ys_trench = ys + clad_offset
c = pp.Component()
c.add_polygon(list(zip(xs, ys)) + list(zip(xs, -ys))[::-1], layer=wg_layer)
c.add_polygon(
list(zip(xs, ys_trench)) + list(zip(xs, -ys_trench))[::-1], layer=clad_layer
)
c.add_port(
name="W0",
midpoint=(xs[0], 0),
width=2 * ys[0],
orientation=180,
port_type="optical",
)
c.add_port(
name="E0",
midpoint=(xs[-1], 0),
width=2 * ys[-1],
orientation=0,
port_type="optical",
)
return c
@pp.autoname
def taper_w3_l36(**kwargs):
csv_path = data_path / "taper_strip_0p5_3_36.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.autoname
def taper_w10_l100(**kwargs):
csv_path = data_path / "taper_strip_0p5_10_100.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.autoname
def taper_w10_l150(**kwargs):
csv_path = data_path / "taper_strip_0p5_10_150.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.autoname
def taper_w10_l200(**kwargs):
csv_path = data_path / "taper_strip_0p5_10_200.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.autoname
def taper_w11_l200(**kwargs):
csv_path = data_path / "taper_strip_0p5_11_200.csv"
return taper_from_csv(csv_path, **kwargs)
@pp.autoname
def taper_w12_l200(**kwargs):
csv_path = data_path / "taper_strip_0p5_12_200.csv"
return taper_from_csv(csv_path, **kwargs)
if __name__ == "__main__":
c = taper_w3_l36()
# c = taper_w10_l100()
# c = taper_w11_l200()
pp.show(c)
| {
"repo_name": "psiq/gdsfactory",
"path": "pp/components/taper_from_csv.py",
"copies": "1",
"size": "1943",
"license": "mit",
"hash": -3334166929318280000,
"line_mean": 22.987654321,
"line_max": 84,
"alpha_frac": 0.5949562532,
"autogenerated": false,
"ratio": 2.52665799739922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.362161425059922,
"avg_score": null,
"num_lines": null
} |
"""A dialog window to change the filter options.
"""
import datetime
from PySide import QtCore, QtGui
import photo.idxfilter
from photo.geo import GeoPosition
class GeoPosEdit(QtGui.QLineEdit):
"""A QLineEdit with a suitable size for a GeoPosition.
"""
def sizeHint(self):
sh = super().sizeHint()
fm = self.fontMetrics()
postext = "\u2014%s\u2014" % GeoPosition("90.0 S, 180.0 E").floatstr()
sh.setWidth(fm.boundingRect(postext).width())
return sh
class FilterOption(object):
def __init__(self, criterion, parent):
self.groupbox = QtGui.QGroupBox("Filter by %s" % criterion)
self.groupbox.setCheckable(True)
parent.addWidget(self.groupbox)
def getOption(self):
raise NotImplementedError
def setOption(self, optionValue):
self.groupbox.setChecked(optionValue is not None)
class TagFilterOption(FilterOption):
def __init__(self, parent):
super().__init__("tags", parent)
self.entry = QtGui.QLineEdit()
label = QtGui.QLabel("Tags:")
label.setBuddy(self.entry)
layout = QtGui.QHBoxLayout()
layout.addWidget(label)
layout.addWidget(self.entry)
self.groupbox.setLayout(layout)
def getOption(self):
if self.groupbox.isChecked():
return { 'tags': self.entry.text() }
else:
return {}
def setOption(self, taglist, negtaglist):
super().setOption(taglist)
if taglist is not None:
tags = sorted(taglist)
negtags = ["!%s" % t for t in sorted(negtaglist)]
self.entry.setText(",".join(tags + negtags))
class SelectFilterOption(FilterOption):
def __init__(self, parent):
super().__init__("selection", parent)
self.buttonYes = QtGui.QRadioButton("selected")
self.buttonNo = QtGui.QRadioButton("not selected")
layout = QtGui.QHBoxLayout()
layout.addWidget(self.buttonYes)
layout.addWidget(self.buttonNo)
self.groupbox.setLayout(layout)
def getOption(self):
if self.groupbox.isChecked():
return { 'select': bool(self.buttonYes.isChecked()) }
else:
return {}
def setOption(self, select):
super().setOption(select)
if select is not None:
if select:
self.buttonYes.setChecked(QtCore.Qt.Checked)
self.buttonNo.setChecked(QtCore.Qt.Unchecked)
else:
self.buttonYes.setChecked(QtCore.Qt.Unchecked)
self.buttonNo.setChecked(QtCore.Qt.Checked)
class DateFilterOption(FilterOption):
def __init__(self, parent):
super().__init__("date", parent)
self.startEntry = QtGui.QLineEdit()
startLabel = QtGui.QLabel("Start:")
startLabel.setBuddy(self.startEntry)
self.endEntry = QtGui.QLineEdit()
endLabel = QtGui.QLabel("End:")
endLabel.setBuddy(self.endEntry)
layout = QtGui.QGridLayout()
layout.addWidget(startLabel, 0, 0)
layout.addWidget(self.startEntry, 0, 1)
layout.addWidget(endLabel, 1, 0)
layout.addWidget(self.endEntry, 1, 1)
self.groupbox.setLayout(layout)
def getOption(self):
if self.groupbox.isChecked():
startdate = self.startEntry.text()
enddate = self.endEntry.text()
if enddate:
datestr = "%s--%s" % (startdate, enddate)
else:
datestr = startdate
return { 'date': photo.idxfilter.strpdate(datestr) }
else:
return {}
def setOption(self, date):
super().setOption(date)
if date is not None:
self.startEntry.setText(date[0].isoformat())
self.endEntry.setText(date[1].isoformat())
class GPSFilterOption(FilterOption):
def __init__(self, parent):
super().__init__("GPS position", parent)
self.posEntry = GeoPosEdit()
posLabel = QtGui.QLabel("Position:")
posLabel.setBuddy(self.posEntry)
self.radiusEntry = QtGui.QLineEdit()
radiusLabel = QtGui.QLabel("Radius:")
radiusLabel.setBuddy(self.radiusEntry)
layout = QtGui.QGridLayout()
layout.addWidget(posLabel, 0, 0)
layout.addWidget(self.posEntry, 0, 1)
layout.addWidget(radiusLabel, 1, 0)
layout.addWidget(self.radiusEntry, 1, 1)
self.groupbox.setLayout(layout)
def getOption(self):
if self.groupbox.isChecked():
return { 'gpspos': GeoPosition(self.posEntry.text()),
'gpsradius': float(self.radiusEntry.text()) }
else:
return {}
def setOption(self, gpspos, gpsradius):
super().setOption(gpspos)
if gpspos is not None:
self.posEntry.setText(gpspos.floatstr())
self.radiusEntry.setText(str(gpsradius))
class ListFilterOption(FilterOption):
def __init__(self, parent):
super().__init__("explicit file names", parent)
self.entry = QtGui.QLineEdit()
label = QtGui.QLabel("Files:")
label.setBuddy(self.entry)
layout = QtGui.QHBoxLayout()
layout.addWidget(label)
layout.addWidget(self.entry)
self.groupbox.setLayout(layout)
def getOption(self):
if self.groupbox.isChecked():
return { 'files': self.entry.text().split() }
else:
return {}
def setOption(self, filelist):
super().setOption(filelist)
if filelist is not None:
self.entry.setText(" ".join(sorted(str(p) for p in filelist)))
class FilterDialog(QtGui.QDialog):
def __init__(self):
super().__init__()
mainLayout = QtGui.QVBoxLayout()
self.tagFilterOption = TagFilterOption(mainLayout)
self.selectFilterOption = SelectFilterOption(mainLayout)
self.dateFilterOption = DateFilterOption(mainLayout)
self.gpsFilterOption = GPSFilterOption(mainLayout)
self.filelistFilterOption = ListFilterOption(mainLayout)
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok |
QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
mainLayout.addWidget(buttonBox, alignment=QtCore.Qt.AlignHCenter)
self.setLayout(mainLayout)
self.setWindowTitle("Filter options")
def setfilter(self, imgFilter):
self.imgFilter = imgFilter
self.tagFilterOption.setOption(imgFilter.taglist, imgFilter.negtaglist)
self.selectFilterOption.setOption(imgFilter.select)
self.dateFilterOption.setOption(imgFilter.date)
self.gpsFilterOption.setOption(imgFilter.gpspos, imgFilter.gpsradius)
self.filelistFilterOption.setOption(imgFilter.filelist)
def accept(self):
filterArgs = {}
filterArgs.update(self.tagFilterOption.getOption())
filterArgs.update(self.selectFilterOption.getOption())
filterArgs.update(self.dateFilterOption.getOption())
filterArgs.update(self.gpsFilterOption.getOption())
filterArgs.update(self.filelistFilterOption.getOption())
self.imgFilter = photo.idxfilter.IdxFilter(**filterArgs)
super().accept()
| {
"repo_name": "RKrahl/photo-tools",
"path": "photo/qt/filterDialog.py",
"copies": "1",
"size": "7344",
"license": "apache-2.0",
"hash": -504135239210858700,
"line_mean": 33.4788732394,
"line_max": 79,
"alpha_frac": 0.6252723312,
"autogenerated": false,
"ratio": 3.9891363389462247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5114408670146224,
"avg_score": null,
"num_lines": null
} |
"""A Dialog Window to display the result of a :class:`jukeboxcore.action.ActionCollection`."""
import abc
from PySide import QtGui, QtCore
from jukeboxcore.gui.main import JB_Dialog, JB_MainWindow
from jukeboxcore.gui.actionreport import create_action_model
from jukeboxcore.gui.widgetdelegate import WidgetDelegate, WD_TableView
from actionreportdialog_ui import Ui_ActionReportDialog
class TextPopupButton(QtGui.QPushButton):
"""A abstract push button that will show a textedit as popup when you click on it
Intended to be used in the :class:`jukeboxcore.gui.widgetdelegate.WidgetDelegate`.
Subclass it and reimplement :meth:`TextPopupButton.get_popup_text`
"""
def __init__(self, popuptitle, text, parent=None):
"""
:param popuptitle: Title for the popup. shown in the titlebar of the popup
:type popuptitle: str
:param text: Text on the button. Not in the popup.
:type text: str
:param parent: widget parent
:type parent: QtGui.QWidget
:raises: None
"""
super(TextPopupButton, self).__init__(text, parent)
self.popuptitle = popuptitle
self.setAutoFillBackground(True)
self.setText(text)
self.clicked.connect(self.show_popup)
def show_popup(self, *args, **kwargs):
"""Show a popup with a textedit
:returns: None
:rtype: None
:raises: None
"""
self.mw = JB_MainWindow(parent=self, flags=QtCore.Qt.Dialog)
self.mw.setWindowTitle(self.popuptitle)
self.mw.setWindowModality(QtCore.Qt.ApplicationModal)
w = QtGui.QWidget()
self.mw.setCentralWidget(w)
vbox = QtGui.QVBoxLayout(w)
pte = QtGui.QPlainTextEdit()
pte.setPlainText(self.get_popup_text())
vbox.addWidget(pte)
# move window to cursor position
d = self.cursor().pos() - self.mw.mapToGlobal(self.mw.pos())
self.mw.move(d)
self.mw.show()
@abc.abstractmethod
def get_popup_text(self):
"""Return a text for the popup
:returns: some text
:rtype: str
:raises: None
"""
pass
class TracebackButton(TextPopupButton):
"""A push button that will show the traceback of an :class:`jukeboxcore.action.ActionUnit`.
Intended to be used in the :class:`jukeboxcore.gui.widgetdelegate.ActionUnitDelegate`.
"""
def __init__(self, parent=None):
"""Initialize a new TracebackButton
:param parent: widget parent
:type parent: QtGui.QWidget
:raises: None
"""
super(TracebackButton, self).__init__("Traceback", "Show Traceback", parent)
self.actionunit = None # the current action unit
def set_index(self, index):
"""Display the data of the given index
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
item = index.internalPointer()
self.actionunit = item.internal_data()
self.setEnabled(bool(self.actionunit.status.traceback))
@abc.abstractmethod
def get_popup_text(self):
"""Return a text for the popup
:returns: some text
:rtype: str
:raises: None
"""
if self.actionunit:
return self.actionunit.status.traceback
else:
return ""
class MessageButton(TextPopupButton):
"""A push button that will show the message of an :class:`jukeboxcore.action.ActionUnit`.
Intended to be used in the :class:`jukeboxcore.gui.widgetdelegate.ActionUnitDelegate`.
"""
def __init__(self, parent=None):
"""Initialize a new MessageButton
:param parent: widget parent
:type parent: QtGui.QWidget
:raises: None
"""
super(MessageButton, self).__init__("Message", "Show Message", parent)
self.actionunit = None # the current action unit
def set_index(self, index):
"""Display the data of the given index
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
item = index.internalPointer()
self.actionunit = item.internal_data()
self.setEnabled(bool(self.actionunit.status.message))
@abc.abstractmethod
def get_popup_text(self):
"""Return a text for the popup
:returns: some text
:rtype: str
:raises: None
"""
if self.actionunit:
return self.actionunit.status.message
else:
return ""
class ActionUnitTracebackDelegate(WidgetDelegate):
"""A delegate for drawing the tracebackcolumn of a :class:`jukeboxcore.gui.actionreport.ActionItenData`.
"""
def __init__(self, parent=None):
"""
:param parent: the parent object
:type parent: QObject
:raises: None
"""
super(ActionUnitTracebackDelegate, self).__init__(parent)
def create_widget(self, parent=None):
"""Return a widget that should get painted by the delegate
You might want to use this in :meth:`WidgetDelegate.createEditor`
:returns: The created widget | None
:rtype: QtGui.QWidget | None
:raises: None
"""
return TracebackButton(parent)
def set_widget_index(self, index):
"""Set the index for the widget. The widget should retrieve data from the index and display it.
You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.widget.set_index(index)
def create_editor_widget(self, parent, option, index):
"""Return the editor to be used for editing the data item with the given index.
Note that the index contains information about the model being used.
The editor's parent widget is specified by parent, and the item options by option.
:param parent: the parent widget
:type parent: QtGui.QWidget
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: Widget
:rtype: :class:`QtGui.QWidget`
:raises: None
"""
return self.create_widget(parent)
def setEditorData(self, editor, index):
"""Sets the contents of the given editor to the data for the item at the given index.
Note that the index contains information about the model being used.
:param editor: the editor widget
:type editor: QtGui.QWidget
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
editor.set_index(index)
class ActionUnitMessageDelegate(WidgetDelegate):
"""A delegate for drawing the tracebackcolumn of a :class:`jukeboxcore.gui.actionreport.ActionItenData`.
"""
def __init__(self, parent=None):
"""
:param parent: the parent object
:type parent: QObject
:raises: None
"""
super(ActionUnitMessageDelegate, self).__init__(parent)
def create_widget(self, parent=None):
"""Return a widget that should get painted by the delegate
You might want to use this in :meth:`WidgetDelegate.createEditor`
:returns: The created widget | None
:rtype: QtGui.QWidget | None
:raises: None
"""
return MessageButton(parent)
def set_widget_index(self, index):
"""Set the index for the widget. The widget should retrieve data from the index and display it.
You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.widget.set_index(index)
def create_editor_widget(self, parent, option, index):
"""Return the editor to be used for editing the data item with the given index.
Note that the index contains information about the model being used.
The editor's parent widget is specified by parent, and the item options by option.
:param parent: the parent widget
:type parent: QtGui.QWidget
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: Widget
:rtype: :class:`QtGui.QWidget`
:raises: None
"""
return self.create_widget(parent)
def setEditorData(self, editor, index):
"""Sets the contents of the given editor to the data for the item at the given index.
Note that the index contains information about the model being used.
:param editor: the editor widget
:type editor: QtGui.QWidget
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
editor.set_index(index)
class ActionReportDialog(JB_Dialog, Ui_ActionReportDialog):
"""A dialog that can show the result of a :class:`jukeboxcore.action.ActionCollection`
The dialog will ask the user to confirm the report or cancel.
The dialog uses the actionreportdialog.ui for it's layout.
"""
def __init__(self, actioncollection, parent=None, flags=0):
"""Construct a new dialog for the given action collection
:param actioncollection: the action collection to report
:type actioncollection: :class:`jukeboxcore.action.ActionCollection`
:param parent: Optional - the parent of the window - default is None
:type parent: QWidget
:param flags: the window flags
:type flags: QtCore.Qt.WindowFlags
:raises: None
"""
super(ActionReportDialog, self).__init__(parent, flags)
self.setupUi(self)
self._actioncollection = actioncollection
self._parent = parent
self._flags = flags
status = self._actioncollection.status()
self.status_lb.setText(status.value)
self.message_lb.setText(status.message)
self.traceback_pte.setPlainText(status.traceback)
self.traceback_pte.setVisible(False)
model = create_action_model(self._actioncollection)
self.actions_tablev = WD_TableView(self)
self.actions_tablev.setModel(model)
self.verticalLayout.insertWidget(1, self.actions_tablev)
self.msgdelegate = ActionUnitMessageDelegate(self)
self.tbdelegate = ActionUnitTracebackDelegate(self)
self.actions_tablev.setItemDelegateForColumn(3, self.msgdelegate)
self.actions_tablev.setItemDelegateForColumn(4, self.tbdelegate)
self.actions_tablev.horizontalHeader().setStretchLastSection(True)
| {
"repo_name": "JukeboxPipeline/jukebox-core",
"path": "src/jukeboxcore/gui/widgets/actionreportdialog.py",
"copies": "1",
"size": "11181",
"license": "bsd-3-clause",
"hash": 3114866749236286500,
"line_mean": 32.0798816568,
"line_max": 108,
"alpha_frac": 0.6398354351,
"autogenerated": false,
"ratio": 4.251330798479088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007019730105190428,
"num_lines": 338
} |
"""A dialog window to set and remove tags.
"""
import math
from PySide import QtCore, QtGui
class TagSelectDialog(QtGui.QDialog):
def __init__(self, taglist):
super().__init__()
self.checkLayout = QtGui.QGridLayout()
self.settags(taglist)
self.entry = QtGui.QLineEdit()
self.entry.returnPressed.connect(self.newtag)
entryLabel = QtGui.QLabel("New tag:")
entryLabel.setBuddy(self.entry)
entryLayout = QtGui.QHBoxLayout()
entryLayout.addWidget(entryLabel)
entryLayout.addWidget(self.entry)
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok |
QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addLayout(self.checkLayout)
mainLayout.addLayout(entryLayout)
mainLayout.addWidget(buttonBox, alignment=QtCore.Qt.AlignHCenter)
self.setLayout(mainLayout)
self.setWindowTitle("Select tags")
def settags(self, tags):
self.taglist = set(tags)
self.tagCheck = {}
# We need to rearrange the checkbox widgets in
# self.checkLayout. To this end, remove all old checkboxes
# first, store those to be retained to self.tagCheck and then
# add the new set of checkboxes to self.checkLayout, reusing
# those in self.tagCheck.
while True:
child = self.checkLayout.takeAt(0)
if not child:
break
cb = child.widget()
t = cb.text()
if t in self.taglist:
self.tagCheck[t] = cb
if len(self.taglist) > 0:
ncol = int(math.ceil(math.sqrt(len(self.taglist)/10)))
nrow = int(math.ceil(len(self.taglist)/ncol))
c = 0
for t in sorted(self.taglist):
if t not in self.tagCheck:
self.tagCheck[t] = QtGui.QCheckBox(t)
cb = self.tagCheck[t]
self.checkLayout.addWidget(cb, c % nrow, c // nrow)
c += 1
def newtag(self):
t = self.entry.text()
if t not in self.taglist:
self.taglist.add(t)
self.settags(self.taglist)
self.tagCheck[t].setCheckState(QtCore.Qt.Checked)
self.adjustSize()
self.entry.setText("")
def setCheckedTags(self, tags):
for t in self.taglist:
state = QtCore.Qt.Checked if t in tags else QtCore.Qt.Unchecked
self.tagCheck[t].setCheckState(state)
def checkedTags(self):
return { t for t in self.taglist if self.tagCheck[t].isChecked() }
| {
"repo_name": "RKrahl/photo-tools",
"path": "photo/qt/tagSelectDialog.py",
"copies": "1",
"size": "2755",
"license": "apache-2.0",
"hash": 3099390495574285000,
"line_mean": 34.3205128205,
"line_max": 75,
"alpha_frac": 0.5916515426,
"autogenerated": false,
"ratio": 3.8912429378531073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9980294765353392,
"avg_score": 0.0005199430199430199,
"num_lines": 78
} |
"""A dialog window to show some informations on the current image.
"""
from PySide import QtCore, QtGui
from photo.exif import Exif
class ImageInfoDialog(QtGui.QDialog):
def __init__(self, basedir):
super().__init__()
self.basedir = basedir
infoLayout = QtGui.QGridLayout()
cameraModelLabel = QtGui.QLabel("Camera model:")
self.cameraModel = QtGui.QLabel()
self.cameraModel.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(cameraModelLabel, 0, 0)
infoLayout.addWidget(self.cameraModel, 0, 1)
filenameLabel = QtGui.QLabel("File name:")
self.filename = QtGui.QLabel()
self.filename.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(filenameLabel, 1, 0)
infoLayout.addWidget(self.filename, 1, 1)
createDateLabel = QtGui.QLabel("Create date:")
self.createDate = QtGui.QLabel()
self.createDate.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(createDateLabel, 2, 0)
infoLayout.addWidget(self.createDate, 2, 1)
orientationLabel = QtGui.QLabel("Orientation:")
self.orientation = QtGui.QLabel()
self.orientation.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(orientationLabel, 3, 0)
infoLayout.addWidget(self.orientation, 3, 1)
gpsPositionLabel = QtGui.QLabel("GPS position:")
self.gpsPosition = QtGui.QLabel()
self.gpsPosition.setTextFormat(QtCore.Qt.RichText)
self.gpsPosition.setOpenExternalLinks(True)
infoLayout.addWidget(gpsPositionLabel, 4, 0)
infoLayout.addWidget(self.gpsPosition, 4, 1)
exposureTimeLabel = QtGui.QLabel("Exposure time:")
self.exposureTime = QtGui.QLabel()
self.exposureTime.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(exposureTimeLabel, 5, 0)
infoLayout.addWidget(self.exposureTime, 5, 1)
apertureLabel = QtGui.QLabel("F-number:")
self.aperture = QtGui.QLabel()
self.aperture.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(apertureLabel, 6, 0)
infoLayout.addWidget(self.aperture, 6, 1)
isoLabel = QtGui.QLabel("ISO speed rating:")
self.iso = QtGui.QLabel()
self.iso.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(isoLabel, 7, 0)
infoLayout.addWidget(self.iso, 7, 1)
focalLengthLabel = QtGui.QLabel("Focal length:")
self.focalLength = QtGui.QLabel()
self.focalLength.setTextFormat(QtCore.Qt.PlainText)
infoLayout.addWidget(focalLengthLabel, 8, 0)
infoLayout.addWidget(self.focalLength, 8, 1)
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addLayout(infoLayout)
mainLayout.addWidget(buttonBox, alignment=QtCore.Qt.AlignHCenter)
self.setLayout(mainLayout)
self.setWindowTitle("Image Info")
def setinfo(self, item):
exifdata = Exif(self.basedir / item.filename)
self.cameraModel.setText(str(exifdata.cameraModel))
self.filename.setText(str(item.filename))
if item.createDate:
self.createDate.setText(item.createDate.strftime("%a, %x %X"))
else:
self.createDate.setText(None)
if item.orientation:
self.orientation.setText(item.orientation)
else:
self.orientation.setText(None)
pos = item.gpsPosition
if pos:
link = "<a href='%s'>%s</a>" % (pos.as_osmurl(), str(pos))
self.gpsPosition.setText(link)
else:
self.gpsPosition.setText(None)
self.exposureTime.setText(str(exifdata.exposureTime))
self.aperture.setText(str(exifdata.aperture))
self.iso.setText(str(exifdata.iso))
self.focalLength.setText(str(exifdata.focalLength))
| {
"repo_name": "RKrahl/photo-tools",
"path": "photo/qt/imageInfoDialog.py",
"copies": "1",
"size": "3954",
"license": "apache-2.0",
"hash": -5526188542468301000,
"line_mean": 41.9782608696,
"line_max": 74,
"alpha_frac": 0.6641375822,
"autogenerated": false,
"ratio": 3.857560975609756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 92
} |
# A Dialog with a ScrolledText widget.
import Pmw
class TextDialog(Pmw.Dialog):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('borderx', 10, INITOPT),
('bordery', 10, INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.Dialog.__init__(self, parent)
# Create the components.
interior = self.interior()
aliases = (
('text', 'scrolledtext_text'),
('label', 'scrolledtext_label'),
)
self._text = self.createcomponent('scrolledtext',
aliases, None,
Pmw.ScrolledText, (interior,))
self._text.pack(side='top', expand=1, fill='both',
padx = self['borderx'], pady = self['bordery'])
# Check keywords and initialise options.
self.initialiseoptions()
# Need to explicitly forward this to override the stupid
# (grid_)bbox method inherited from Tkinter.Toplevel.Grid.
def bbox(self, index):
return self._text.bbox(index)
Pmw.forwardmethods(TextDialog, Pmw.ScrolledText, '_text')
| {
"repo_name": "leuschel/logen",
"path": "old_logen/pylogen/Pmw/Pmw_1_2/lib/PmwTextDialog.py",
"copies": "6",
"size": "1069",
"license": "apache-2.0",
"hash": 394956710383247800,
"line_mean": 27.1315789474,
"line_max": 62,
"alpha_frac": 0.6772684752,
"autogenerated": false,
"ratio": 3.191044776119403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.048542303477582774,
"num_lines": 38
} |
# A dice rolling bot for use on Discord servers
# LICENSE: This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
# @category Tools
# @copyright Copyright (c) 2016 Robert Thayer (http://www.gamergadgets.net)
# @version 1.1
# @link http://www.gamergadgets.net
# @author Robert Thayer
from random import randint
import discord # Imported from https://github.com/Rapptz/discord.py
import asyncio
from discord.ext import commands
# A dice bot for use with Discord
bot = discord.Client()
bot = commands.Bot(command_prefix='!', description="A bot to handle all your RPG rolling needs")
# Determines if a message is owned by the bot
def is_me(m):
return m.author == bot.user
# Determines if the value can be converted to an integer
# Parameters: s - input string
# Returns: boolean. True if can be converted, False if it throws an error.
def is_num(s):
try:
int(s)
return True
except ValueError:
return False
# Roll die and get a random number between a and b (inclusive) adding/subtracting the modifier
# Parameters: a [low number], b [high number], modifier [amount to add/subtract to total]
# threshold [number that result needs to match or exceed to count as a success]
# Returns: str
def roll_basic(a, b, modifier, threshold):
results = ""
base = randint(int(a), int(b))
if (base + modifier) >= threshold:
if modifier != 0:
if modifier > 0:
results += "***Success***: {}+{} [{}] meets or beats the {} threshold.".format(base, modifier, (base + modifier), threshold)
else:
results += "***Success***: {}{} [{}] does not meet the {} threshold.".format(base, modifier, (base + modifier), threshold)
else:
results += "***Success***: {}".format(base)
else:
if modifier != 0:
if modifier > 0:
results += "***Failure***: {}+{} [{}]".format(base, modifier, (base + modifier))
else:
results += "***Failure***: {}{} [{}]".format(base, modifier, (base + modifier))
else:
results += "***Failure***: {}".format(base)
return results
# Rolls a set of die and returns either number of hits or the total amount
# Parameters: num_of_dice [Number of dice to roll], dice_type[die type (e.g. d8, d6),
# hit [number that must be exceeded to count as a success], modifier [amount to add to/subtract from total],
# threshold [number of successes needed to be a win]
# Returns: String with results
def roll_hit(num_of_dice, dice_type, hit, modifier, threshold):
results = ""
total = 0
for x in range(0, int(num_of_dice)):
y = randint(1, int(dice_type))
if (int(hit) > 0):
if (y >= int(hit)):
results += "**{}** ".format(y)
total += 1
else:
results += "{} ".format(y)
else:
results += "{} ".format(y)
total += y
total += int(modifier)
if modifier != 0:
if modifier > 0:
results += "+{} = {}".format(modifier, total)
else:
results += "{} = {}".format(modifier, total)
else:
results += "= {}".format(total)
if threshold != 0:
if total >= threshold:
results += " meets or beats the {} threshold. ***Success***".format(threshold)
else:
results += " does not meet the {} threshold. ***Failure***".format(threshold)
return results
@bot.event
@asyncio.coroutine
def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
# Parse !roll verbiage
@bot.command(pass_context=True,description='Rolls dice.\nExamples:\n100 Rolls 1-100.\n50-100 Rolls 50-100.\n3d6 Rolls 3 d6 dice and returns total.\nModifiers:\n! Hit success. 3d6!5 Counts number of rolls that are greater than 5.\nmod: Modifier. 3d6mod3 or 3d6mod-3. Adds 3 to the result.\n> Threshold. 100>30 returns success if roll is greater than or equal to 30.\n\nFormatting:\nMust be done in order.\nSingle die roll: 1-100mod2>30\nMultiple: 5d6!4mod-2>2')
@asyncio.coroutine
def roll(ctx, roll : str):
a, b, modifier, hit, num_of_dice, threshold, dice_type = 0, 0, 0, 0, 0, 0, 0
# author: Writer of discord message
author = ctx.message.author
if (roll.find('>') != -1):
roll, threshold = roll.split('>')
if (roll.find('mod') != -1):
roll, modifier = roll.split('mod')
if (roll.find('!') != -1):
roll, hit = roll.split('!')
if (roll.find('d') != -1):
num_of_dice, dice_type = roll.split('d')
elif (roll.find('-') != -1):
a, b = roll.split('-')
else:
a = 1
b = roll
#Validate data
try:
if (modifier != 0):
if (is_num(modifier) is False):
raise ValueError("Modifier value format error. Proper usage 1d4+1")
return
else:
modifier = int(modifier)
if (hit != 0):
if (is_num(hit) is False):
raise ValueError("Hit value format error. Proper usage 3d6!5")
return
else:
hit = int(hit)
if (num_of_dice != 0):
if (is_num(num_of_dice) is False):
raise ValueError("Number of dice format error. Proper usage 3d6")
return
else:
num_of_dice = int(num_of_dice)
if (num_of_dice > 200):
raise ValueError("Too many dice. Please limit to 200 or less.")
return
if (dice_type != 0):
if (is_num(dice_type) is False):
raise ValueError("Dice type format error. Proper usage 3d6")
return
else:
dice_type = int(dice_type)
if (a != 0):
if (is_num(a) is False):
raise ValueError("Error: Minimum must be a number. Proper usage 1-50.")
return
else:
a = int(a)
if (b != 0):
if (is_num(b) is False):
raise ValueError("Error: Maximum must be a number. Proper usage 1-50 or 50.")
return
else:
b = int(b)
if (threshold != 0):
if (is_num(threshold) is False):
raise ValueError("Error: Threshold must be a number. Proper usage 1-100>30")
return
else:
threshold = int(threshold)
if (dice_type != 0 and hit != 0):
if (hit > dice_type):
raise ValueError("Error: Hit value cannot be greater than dice type")
return
elif (dice_type < 0):
raise ValueError("Dice type cannot be a negative number.")
return
elif (num_of_dice < 0):
raise ValueError("Number of dice cannot be a negative number.")
return
if a != 0 and b != 0:
yield from bot.say("{} rolls {}-{}. Result: {}".format(author, a, b, roll_basic(a, b, modifier, threshold)))
else:
yield from bot.say("{} rolls {}d{}. Results: {}".format(author, num_of_dice, dice_type, roll_hit(num_of_dice, dice_type, hit, modifier, threshold)))
except ValueError as err:
# Display error message to channel
yield from bot.say(err)
#Bot command to delete all messages the bot has made.
@bot.command(pass_context=True,description='Deletes all messages the bot has made')
@asyncio.coroutine
def purge(ctx):
channel = ctx.message.channel
deleted = yield from bot.purge_from(channel, limit=100, check=is_me)
yield from bot.send_message(channel, 'Deleted {} message(s)'.format(len(deleted)))
# Follow this helpful guide on creating a bot and adding it to your server.
# https://github.com/reactiflux/discord-irc/wiki/Creating-a-discord-bot-&-getting-a-token
bot.run('token')
| {
"repo_name": "Chaithi/Discord-Dice-Roller-Bot",
"path": "diceBot.py",
"copies": "1",
"size": "7993",
"license": "mit",
"hash": 6726080524876891000,
"line_mean": 40.2010309278,
"line_max": 463,
"alpha_frac": 0.5715000626,
"autogenerated": false,
"ratio": 3.6885094600830644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4760009522683064,
"avg_score": null,
"num_lines": null
} |
# A dictionary allowing the conversion of each unit to its base unit.
# An entry consists of the unit's name, a constant number and a constant
# factor that need to be added and multiplied to convert the unit into
# the base unit in the last parameter.
UNITS = {'bit': [0, 1, 'bit'],
'byte': [0, 8, 'bit'],
'cubic-centimeter': [0, 0.000001, 'cubic-meter'],
'cubic-decimeter': [0, 0.001, 'cubic-meter'],
'liter': [0, 0.001, 'cubic-meter'],
'cubic-meter': [0, 1, 'cubic-meter'],
'cubic-inch': [0, 0.000016387064, 'cubic-meter'],
'fluid-ounce': [0, 0.000029574, 'cubic-meter'],
'cubic-foot': [0, 0.028316846592, 'cubic-meter'],
'cubic-yard': [0, 0.764554857984, 'cubic-meter'],
'teaspoon': [0, 0.0000049289216, 'cubic-meter'],
'tablespoon': [0, 0.000014787, 'cubic-meter'],
'cup': [0, 0.00023658823648491, 'cubic-meter'],
'gram': [0, 1, 'gram'],
'kilogram': [0, 1000, 'gram'],
'ton': [0, 1000000, 'gram'],
'ounce': [0, 28.349523125, 'gram'],
'pound': [0, 453.59237, 'gram'],
'kelvin': [0, 1, 'kelvin'],
'celsius': [273.15, 1, 'kelvin'],
'fahrenheit': [255.372222, 0.555555, 'kelvin'],
'centimeter': [0, 0.01, 'meter'],
'decimeter': [0, 0.1, 'meter'],
'meter': [0, 1, 'meter'],
'kilometer': [0, 1000, 'meter'],
'inch': [0, 0.0254, 'meter'],
'foot': [0, 0.3048, 'meter'],
'yard': [0, 0.9144, 'meter'],
'mile': [0, 1609.344, 'meter'],
'nautical-mile': [0, 1852, 'meter'],
'square-centimeter': [0, 0.0001, 'square-meter'],
'square-decimeter': [0, 0.01, 'square-meter'],
'square-meter': [0, 1, 'square-meter'],
'square-kilometer': [0, 1000000, 'square-meter'],
'square-inch': [0, 0.00064516, 'square-meter'],
'square-foot': [0, 0.09290304, 'square-meter'],
'square-yard': [0, 0.83612736, 'square-meter'],
'square-mile': [0, 2589988.110336, 'square-meter'],
'are': [0, 100, 'square-meter'],
'hectare': [0, 10000, 'square-meter'],
'acre': [0, 4046.8564224, 'square-meter']}
PREFIXES = {'atto': -18,
'femto': -15,
'pico': -12,
'nano': -9,
'micro': -6,
'milli': -3,
'centi': -2,
'deci': -1,
'deca': 1,
'hecto': 2,
'kilo': 3,
'mega': 6,
'giga': 9,
'tera': 12,
'peta': 15,
'exa': 18}
ALIASES = {'a': 'are',
'ac': 'acre',
'c': 'celsius',
'cm': 'centimeter',
'cm2': 'square-centimeter',
'cm3': 'cubic-centimeter',
'cm^2': 'square-centimeter',
'cm^3': 'cubic-centimeter',
'dm': 'decimeter',
'dm2': 'square-decimeter',
'dm3': 'cubic-decimeter',
'dm^2': 'square-decimeter',
'dm^3': 'cubic-decimeter',
'f': 'fahrenheit',
'fl-oz': 'fluid-ounce',
'ft': 'foot',
'ft2': 'square-foot',
'ft3': 'cubic-foot',
'ft^2': 'square-foot',
'ft^3': 'cubic-foot',
'g': 'gram',
'ha': 'hectare',
'in': 'inch',
'in2': 'square-inch',
'in3': 'cubic-inch',
'in^2': 'square-inch',
'in^3': 'cubic-inch',
'k': 'kelvin',
'kg': 'kilogram',
'km': 'kilometer',
'km2': 'square-kilometer',
'km^2': 'square-kilometer',
'l': 'liter',
'lb': 'pound',
'm': 'meter',
'm2': 'square-meter',
'm3': 'cubic-meter',
'm^2': 'square-meter',
'm^3': 'cubic-meter',
'mi': 'mile',
'mi2': 'square-mile',
'mi^2': 'square-mile',
'nmi': 'nautical-mile',
'oz': 'ounce',
't': 'ton',
'tbsp': 'tablespoon',
'tsp': 'teaspoon',
'y': 'yard',
'y2': 'square-yard',
'y3': 'cubic-yard',
'y^2': 'square-yard',
'y^3': 'cubic-yard'}
HELP_MESSAGE = ('Converter usage:\n'
'`@convert <number> <unit_from> <unit_to>`\n'
'Converts `number` in the unit <unit_from> to '
'the <unit_to> and prints the result\n'
'`number`: integer or floating point number, e.g. 12, 13.05, 0.002\n'
'<unit_from> and <unit_to> are two of the following units:\n'
'* square-centimeter (cm^2, cm2), square-decimeter (dm^2, dm2), '
'square-meter (m^2, m2), square-kilometer (km^2, km2),'
' square-inch (in^2, in2), square-foot (ft^2, ft2), square-yard (y^2, y2), '
' square-mile(mi^2, mi2), are (a), hectare (ha), acre (ac)\n'
'* bit, byte\n'
'* centimeter (cm), decimeter(dm), meter (m),'
' kilometer (km), inch (in), foot (ft), yard (y),'
' mile (mi), nautical-mile (nmi)\n'
'* Kelvin (K), Celsius(C), Fahrenheit (F)\n'
'* cubic-centimeter (cm^3, cm3), cubic-decimeter (dm^3, dm3), liter (l), '
'cubic-meter (m^3, m3), cubic-inch (in^3, in3), fluid-ounce (fl-oz), '
'cubic-foot (ft^3, ft3), cubic-yard (y^3, y3)\n'
'* gram (g), kilogram (kg), ton (t), ounce (oz), pound(lb)\n'
'* (metric only, U.S. and imperial units differ slightly:) teaspoon (tsp), tablespoon (tbsp), cup\n\n\n'
'Allowed prefixes are:\n'
'* atto, pico, femto, nano, micro, milli, centi, deci\n'
'* deca, hecto, kilo, mega, giga, tera, peta, exa\n\n\n'
'Usage examples:\n'
'* `@convert 12 celsius fahrenheit`\n'
'* `@convert 0.002 kilomile millimeter`\n'
'* `@convert 31.5 square-mile ha`\n'
'* `@convert 56 g lb`\n')
QUICK_HELP = 'Enter `@convert help` for help on using the converter.'
| {
"repo_name": "susansls/zulip",
"path": "contrib_bots/bots/converter/utils.py",
"copies": "18",
"size": "6179",
"license": "apache-2.0",
"hash": -4990694794507015000,
"line_mean": 41.3219178082,
"line_max": 120,
"alpha_frac": 0.4573555592,
"autogenerated": false,
"ratio": 2.924278277330809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A dictionary-equipped extractive QA model."""
import logging
logger = logging.getLogger(__name__)
import theano
from theano import tensor
from theano.gradient import disconnected_grad
from theano.sandbox.rng_mrg import MRG_RandomStreams
from collections import OrderedDict
import blocks.config
from blocks.bricks import Initializable, Linear, NDimensionalSoftmax, MLP, Tanh, Rectifier
from blocks.bricks.base import application, Brick
from blocks.bricks.simple import Rectifier
from blocks.bricks.recurrent import LSTM
from blocks.bricks.recurrent.misc import Bidirectional
from blocks.bricks.lookup import LookupTable
from blocks.roles import VariableRole, add_role
from blocks.select import Selector
from dictlearn.ops import WordToIdOp, RetrievalOp
from dictlearn.lookup import (
LSTMReadDefinitions, MeanPoolReadDefinitions,
MeanPoolCombiner)
from dictlearn.theano_util import unk_ratio
class EmbeddingRole(VariableRole):
pass
EMBEDDINGS = EmbeddingRole()
def flip01(x):
return x.transpose((1, 0, 2))
def flip12(x):
return x.transpose((0, 2, 1))
class ExtractiveQAModel(Initializable):
"""The dictionary-equipped extractive QA model.
Parameters
----------
dim : int
The default dimensionality for the components.
emd_dim : int
The dimensionality for the embeddings. If 0, `dim` is used.
coattention : bool
Use the coattention mechanism.
num_input_words : int
The number of input words. If 0, `vocab.size()` is used.
The vocabulary object.
use_definitions : bool
Triggers the use of definitions.
reuse_word_embeddings : bool
compose_type : str
"""
def __init__(self, dim, emb_dim, readout_dims,
num_input_words, def_num_input_words, vocab,
use_definitions, def_word_gating, compose_type, coattention,
def_reader, reuse_word_embeddings, bidir_encoder,
random_unk, recurrent_weights_init,
**kwargs):
self._vocab = vocab
if emb_dim == 0:
emb_dim = dim
if num_input_words == 0:
num_input_words = vocab.size()
if def_num_input_words == 0:
def_num_input_words = num_input_words
self._coattention = coattention
self._num_input_words = num_input_words
self._use_definitions = use_definitions
self._random_unk = random_unk
self._reuse_word_embeddings = reuse_word_embeddings
self.recurrent_weights_init = recurrent_weights_init
lookup_num_words = num_input_words
if reuse_word_embeddings:
lookup_num_words = max(num_input_words, def_num_input_words)
if random_unk:
lookup_num_words = vocab.size()
# Dima: we can have slightly less copy-paste here if we
# copy the RecurrentFromFork class from my other projects.
children = []
self._lookup = LookupTable(lookup_num_words, emb_dim)
self._encoder_fork = Linear(emb_dim, 4 * dim, name='encoder_fork')
if bidir_encoder:
self._encoder_rnn = Bidirectional(LSTM(dim), name='encoder_rnn')
encoded_dim = 2 * dim
else:
self._encoder_rnn = LSTM(dim, name='bidir_encoder_rnn')
encoded_dim = dim
self._question_transform = Linear(encoded_dim, encoded_dim, name='question_transform')
self._bidir_fork = Linear(
3 * encoded_dim if coattention else 2 * encoded_dim, 4 * dim, name='bidir_fork')
self._bidir = Bidirectional(LSTM(dim), name='bidir')
children.extend([self._lookup,
self._encoder_fork, self._encoder_rnn,
self._question_transform,
self._bidir, self._bidir_fork])
activations = [Rectifier()] * len(readout_dims) + [None]
readout_dims = [2 * dim] + readout_dims + [1]
self._begin_readout = MLP(activations, readout_dims, name='begin_readout')
self._end_readout = MLP(activations, readout_dims, name='end_readout')
self._softmax = NDimensionalSoftmax()
children.extend([self._begin_readout, self._end_readout, self._softmax])
if self._use_definitions:
# A potential bug here: we pass the same vocab to the def reader.
# If a different token is reserved for UNK in text and in the definitions,
# we can be screwed.
def_reader_class = eval(def_reader)
def_reader_kwargs = dict(
num_input_words=def_num_input_words,
dim=dim, emb_dim=emb_dim,
vocab=vocab,
lookup=self._lookup if reuse_word_embeddings else None)
if def_reader_class == MeanPoolReadDefinitions:
def_reader_kwargs.update(dict(normalize=True, translate=False))
self._def_reader = def_reader_class(**def_reader_kwargs)
self._combiner = MeanPoolCombiner(
dim=dim, emb_dim=emb_dim,
def_word_gating=def_word_gating, compose_type=compose_type)
children.extend([self._def_reader, self._combiner])
super(ExtractiveQAModel, self).__init__(children=children, **kwargs)
# create default input variables
self.contexts = tensor.lmatrix('contexts')
self.context_mask = tensor.matrix('contexts_mask')
self.questions = tensor.lmatrix('questions')
self.question_mask = tensor.matrix('questions_mask')
self.answer_begins = tensor.lvector('answer_begins')
self.answer_ends = tensor.lvector('answer_ends')
input_vars = [
self.contexts, self.context_mask,
self.questions, self.question_mask,
self.answer_begins, self.answer_ends]
if self._use_definitions:
self.defs = tensor.lmatrix('defs')
self.def_mask = tensor.matrix('def_mask')
self.contexts_def_map = tensor.lmatrix('contexts_def_map')
self.questions_def_map = tensor.lmatrix('questions_def_map')
input_vars.extend([self.defs, self.def_mask,
self.contexts_def_map, self.questions_def_map])
self.input_vars = OrderedDict([(var.name, var) for var in input_vars])
def _push_initialization_config(self):
super(ExtractiveQAModel, self)._push_initialization_config()
self._encoder_rnn.weights_init = self.recurrent_weights_init
self._bidir.weights_init = self.recurrent_weights_init
def set_embeddings(self, embeddings):
self._lookup.parameters[0].set_value(embeddings.astype(theano.config.floatX))
def embeddings_var(self):
return self._lookup.parameters[0]
def def_reading_parameters(self):
parameters = Selector(self._def_reader).get_parameters().values()
parameters.extend(Selector(self._combiner).get_parameters().values())
if self._reuse_word_embeddings:
lookup_parameters = Selector(self._lookup).get_parameters().values()
parameters = [p for p in parameters if p not in lookup_parameters]
return parameters
@application
def _encode(self, application_call, text, mask, def_embs=None, def_map=None, text_name=None):
if not self._random_unk:
text = (
tensor.lt(text, self._num_input_words) * text
+ tensor.ge(text, self._num_input_words) * self._vocab.unk)
if text_name:
application_call.add_auxiliary_variable(
unk_ratio(text, mask, self._vocab.unk),
name='{}_unk_ratio'.format(text_name))
embs = self._lookup.apply(text)
if self._random_unk:
embs = (
tensor.lt(text, self._num_input_words)[:, :, None] * embs
+ tensor.ge(text, self._num_input_words)[:, :, None] * disconnected_grad(embs))
if def_embs:
embs, _, _ = self._combiner.apply(embs, mask, def_embs, def_map)
add_role(embs, EMBEDDINGS)
encoded = flip01(
self._encoder_rnn.apply(
self._encoder_fork.apply(
flip01(embs)),
mask=mask.T)[0])
return encoded
@application
def apply(self, application_call,
contexts, contexts_mask, questions, questions_mask,
answer_begins, answer_ends,
defs=None, def_mask=None, contexts_def_map=None, questions_def_map=None):
def_embs = None
if self._use_definitions:
def_embs = self._def_reader.apply(defs, def_mask)
context_enc = self._encode(contexts, contexts_mask,
def_embs, contexts_def_map, 'context')
question_enc_pre = self._encode(questions, questions_mask,
def_embs, questions_def_map, 'question')
question_enc = tensor.tanh(self._question_transform.apply(question_enc_pre))
# should be (batch size, context length, question_length)
affinity = tensor.batched_dot(context_enc, flip12(question_enc))
affinity_mask = contexts_mask[:, :, None] * questions_mask[:, None, :]
affinity = affinity * affinity_mask - 1000.0 * (1 - affinity_mask)
# soft-aligns every position in the context to positions in the question
d2q_att_weights = self._softmax.apply(affinity, extra_ndim=1)
application_call.add_auxiliary_variable(
d2q_att_weights.copy(), name='d2q_att_weights')
# soft-aligns every position in the question to positions in the document
q2d_att_weights = self._softmax.apply(flip12(affinity), extra_ndim=1)
application_call.add_auxiliary_variable(
q2d_att_weights.copy(), name='q2d_att_weights')
# question encoding "in the view of the document"
question_enc_informed = tensor.batched_dot(
q2d_att_weights, context_enc)
question_enc_concatenated = tensor.concatenate(
[question_enc, question_enc_informed], 2)
# document encoding "in the view of the question"
context_enc_informed = tensor.batched_dot(
d2q_att_weights, question_enc_concatenated)
if self._coattention:
context_enc_concatenated = tensor.concatenate(
[context_enc, context_enc_informed], 2)
else:
question_repr_repeated = tensor.repeat(
question_enc[:, [-1], :], context_enc.shape[1], axis=1)
context_enc_concatenated = tensor.concatenate(
[context_enc, question_repr_repeated], 2)
# note: forward and backward LSTMs share the
# input weights in the current impl
bidir_states = flip01(
self._bidir.apply(
self._bidir_fork.apply(
flip01(context_enc_concatenated)),
mask=contexts_mask.T)[0])
begin_readouts = self._begin_readout.apply(bidir_states)[:, :, 0]
begin_readouts = begin_readouts * contexts_mask - 1000.0 * (1 - contexts_mask)
begin_costs = self._softmax.categorical_cross_entropy(
answer_begins, begin_readouts)
end_readouts = self._end_readout.apply(bidir_states)[:, :, 0]
end_readouts = end_readouts * contexts_mask - 1000.0 * (1 - contexts_mask)
end_costs = self._softmax.categorical_cross_entropy(
answer_ends, end_readouts)
predicted_begins = begin_readouts.argmax(axis=-1)
predicted_ends = end_readouts.argmax(axis=-1)
exact_match = (tensor.eq(predicted_begins, answer_begins) *
tensor.eq(predicted_ends, answer_ends))
application_call.add_auxiliary_variable(
predicted_begins, name='predicted_begins')
application_call.add_auxiliary_variable(
predicted_ends, name='predicted_ends')
application_call.add_auxiliary_variable(
exact_match, name='exact_match')
return begin_costs + end_costs
def apply_with_default_vars(self):
return self.apply(*self.input_vars.values())
| {
"repo_name": "tombosc/dict_based_learning",
"path": "dictlearn/extractive_qa_model.py",
"copies": "1",
"size": "12133",
"license": "mit",
"hash": -8711173015976748000,
"line_mean": 42.024822695,
"line_max": 97,
"alpha_frac": 0.6197972472,
"autogenerated": false,
"ratio": 3.850523643287845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9955164767744158,
"avg_score": 0.0030312245487374213,
"num_lines": 282
} |
"""A dictionary-equipped language model."""
import theano
from theano import tensor
from blocks.bricks import (Initializable, Linear, NDimensionalSoftmax, MLP,
Tanh, Rectifier)
from blocks.bricks.base import application
from blocks.bricks.recurrent import LSTM
from blocks.bricks.lookup import LookupTable
from blocks.initialization import Constant
from dictlearn.theano_util import unk_ratio
from dictlearn.ops import WordToIdOp, RetrievalOp, WordToCountOp
from dictlearn.aggregation_schemes import Perplexity
from dictlearn.stuff import DebugLSTM
from dictlearn.util import masked_root_mean_square
from dictlearn.lookup import (LSTMReadDefinitions, MeanPoolReadDefinitions,
MeanPoolCombiner)
class LanguageModel(Initializable):
"""The dictionary-equipped language model.
Parameters
----------
emb_dim: int
The dimension of word embeddings (including for def model if standalone)
dim : int
The dimension of the RNNs states (including for def model if standalone)
num_input_words : int
The size of the LM's input vocabulary.
num_output_words : int
The size of the LM's output vocabulary.
vocab
The vocabulary object.
retrieval
The dictionary retrieval algorithm. If `None`, the language model
does not use any dictionary.
def_reader: either 'LSTM' or 'mean'
standalone_def_rnn : bool
If `True`, a standalone RNN with separate word embeddings is used
to embed definition. If `False` the language model is reused.
disregard_word_embeddings : bool
If `True`, the word embeddings are not used, only the information
from the definitions is used.
compose_type : str
If 'sum', the definition and word embeddings are averaged
If 'fully_connected_linear', a learned perceptron compose the 2
embeddings linearly
If 'fully_connected_relu', ...
If 'fully_connected_tanh', ...
"""
def __init__(self, emb_dim, emb_def_dim, dim, num_input_words, def_num_input_words,
num_output_words,
vocab, retrieval=None,
def_reader='LSTM',
standalone_def_lookup=True,
standalone_def_rnn=True,
disregard_word_embeddings=False,
compose_type='sum',
very_rare_threshold=[10],
cache_size=0,
**kwargs):
# TODO(tombosc): document
if emb_dim == 0:
emb_dim = dim
if emb_def_dim == 0:
emb_def_dim = emb_dim
if num_input_words == 0:
num_input_words = vocab.size()
if def_num_input_words == 0:
def_num_input_words = num_input_words
if (num_input_words != def_num_input_words) and (not standalone_def_lookup):
raise NotImplementedError()
self._very_rare_threshold = very_rare_threshold
self._num_input_words = num_input_words
self._num_output_words = num_output_words
self._vocab = vocab
self._retrieval = retrieval
self._disregard_word_embeddings = disregard_word_embeddings
self._compose_type = compose_type
self._word_to_id = WordToIdOp(self._vocab)
self._word_to_count = WordToCountOp(self._vocab)
children = []
self._cache = None
if cache_size > 0:
#TODO(tombosc) do we implement cache as LookupTable or theano matrix?
#self._cache = theano.shared(np.zeros((def_num_input_words, emb_dim)))
self._cache = LookupTable(cache_size, emb_dim,
name='cache_def_embeddings')
children.append(self._cache)
if self._retrieval:
self._retrieve = RetrievalOp(retrieval)
self._main_lookup = LookupTable(self._num_input_words, emb_dim, name='main_lookup')
self._main_fork = Linear(emb_dim, 4 * dim, name='main_fork')
self._main_rnn = DebugLSTM(dim, name='main_rnn') # TODO(tombosc): use regular LSTM?
children.extend([self._main_lookup, self._main_fork, self._main_rnn])
if self._retrieval:
if standalone_def_lookup:
lookup = None
else:
if emb_dim != emb_def_dim:
raise ValueError("emb_dim != emb_def_dim: cannot share lookup")
lookup = self._main_lookup
if def_reader == 'LSTM':
if standalone_def_rnn:
fork_and_rnn = None
else:
fork_and_rnn = (self._main_fork, self._main_rnn)
self._def_reader = LSTMReadDefinitions(def_num_input_words, emb_def_dim,
dim, vocab, lookup,
fork_and_rnn, cache=self._cache)
elif def_reader == 'mean':
self._def_reader = MeanPoolReadDefinitions(def_num_input_words, emb_def_dim,
dim, vocab, lookup,
translate=(emb_def_dim!=dim),
normalize=False)
else:
raise Exception("def reader not understood")
self._combiner = MeanPoolCombiner(
dim=dim, emb_dim=emb_dim, compose_type=compose_type)
children.extend([self._def_reader, self._combiner])
self._pre_softmax = Linear(dim, self._num_output_words)
self._softmax = NDimensionalSoftmax()
children.extend([self._pre_softmax, self._softmax])
super(LanguageModel, self).__init__(children=children, **kwargs)
def _push_initialization_config(self):
super(LanguageModel, self)._push_initialization_config()
if self._cache:
self._cache.weights_init = Constant(0.)
def set_def_embeddings(self, embeddings):
self._def_reader._def_lookup.parameters[0].set_value(embeddings.astype(theano.config.floatX))
def get_def_embeddings_params(self):
return self._def_reader._def_lookup.parameters[0]
def get_cache_params(self):
return self._cache.W
def add_perplexity_measure(self, application_call, minus_logs, mask, name):
costs = (minus_logs * mask).sum(axis=0)
perplexity = tensor.exp(costs.sum() / mask.sum())
perplexity.tag.aggregation_scheme = Perplexity(
costs.sum(), mask.sum())
full_name = "perplexity_" + name
application_call.add_auxiliary_variable(perplexity, name=full_name)
return costs
@application
def apply(self, application_call, words, mask):
"""Compute the log-likelihood for a batch of sequences.
words
An integer matrix of shape (B, T), where T is the number of time
step, B is the batch size. Note that this order of the axis is
different from what all RNN bricks consume, hence and the axis
should be transposed at some point.
mask
A float32 matrix of shape (B, T). Zeros indicate the padding.
"""
if self._retrieval:
defs, def_mask, def_map = self._retrieve(words)
def_embeddings = self._def_reader.apply(defs, def_mask)
# Auxililary variable for debugging
application_call.add_auxiliary_variable(
def_embeddings.shape[0], name="num_definitions")
word_ids = self._word_to_id(words)
# shortlisting
input_word_ids = (tensor.lt(word_ids, self._num_input_words) * word_ids
+ tensor.ge(word_ids, self._num_input_words) * self._vocab.unk)
output_word_ids = (tensor.lt(word_ids, self._num_output_words) * word_ids
+ tensor.ge(word_ids, self._num_output_words) * self._vocab.unk)
application_call.add_auxiliary_variable(
unk_ratio(input_word_ids, mask, self._vocab.unk),
name='unk_ratio')
# Run the main rnn with combined inputs
word_embs = self._main_lookup.apply(input_word_ids)
application_call.add_auxiliary_variable(
masked_root_mean_square(word_embs, mask), name='word_emb_RMS')
if self._retrieval:
rnn_inputs, updated, positions = self._combiner.apply(word_embs, mask, def_embeddings, def_map)
else:
rnn_inputs = word_embs
updates = []
if self._cache:
flat_word_ids = word_ids.flatten()
flat_word_ids_to_update = flat_word_ids[positions]
# computing updates for cache
updates = [(self._cache.W, tensor.set_subtensor(self._cache.W[flat_word_ids_to_update], updated))]
application_call.add_auxiliary_variable(
masked_root_mean_square(word_embs, mask), name='main_rnn_in_RMS')
main_rnn_states = self._main_rnn.apply(
tensor.transpose(self._main_fork.apply(rnn_inputs), (1, 0, 2)),
mask=mask.T)[0]
# The first token is not predicted
logits = self._pre_softmax.apply(main_rnn_states[:-1])
targets = output_word_ids.T[1:]
out_softmax = self._softmax.apply(logits, extra_ndim=1)
application_call.add_auxiliary_variable(
out_softmax.copy(), name="proba_out")
minus_logs = self._softmax.categorical_cross_entropy(
targets, logits, extra_ndim=1)
targets_mask = mask.T[1:]
costs = self.add_perplexity_measure(application_call, minus_logs,
targets_mask,
"")
missing_embs = tensor.eq(input_word_ids, self._vocab.unk).astype('int32') # (bs, L)
self.add_perplexity_measure(application_call, minus_logs,
targets_mask * missing_embs.T[:-1],
"after_mis_word_embs")
self.add_perplexity_measure(application_call, minus_logs,
targets_mask * (1-missing_embs.T[:-1]),
"after_word_embs")
word_counts = self._word_to_count(words)
very_rare_masks = []
for threshold in self._very_rare_threshold:
very_rare_mask = tensor.lt(word_counts, threshold).astype('int32')
very_rare_mask = targets_mask * (very_rare_mask.T[:-1])
very_rare_masks.append(very_rare_mask)
self.add_perplexity_measure(application_call, minus_logs,
very_rare_mask,
"after_very_rare_" + str(threshold))
if self._retrieval:
has_def = tensor.zeros_like(output_word_ids)
has_def = tensor.inc_subtensor(has_def[def_map[:,0], def_map[:,1]], 1)
mask_targets_has_def = has_def.T[:-1] * targets_mask # (L-1, bs)
self.add_perplexity_measure(application_call, minus_logs,
mask_targets_has_def,
"after_def_embs")
for thresh, very_rare_mask in zip(self._very_rare_threshold, very_rare_masks):
self.add_perplexity_measure(application_call, minus_logs,
very_rare_mask * mask_targets_has_def,
"after_def_very_rare_" + str(thresh))
application_call.add_auxiliary_variable(
mask_targets_has_def.T, name='mask_def_emb')
return costs, updates
| {
"repo_name": "tombosc/dict_based_learning",
"path": "dictlearn/language_model.py",
"copies": "1",
"size": "11639",
"license": "mit",
"hash": -6561552889433868000,
"line_mean": 41.9483394834,
"line_max": 110,
"alpha_frac": 0.5759085832,
"autogenerated": false,
"ratio": 3.9955372468245796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002651737844265266,
"num_lines": 271
} |
# a dictionary of critics and their ratings of a small
# set of movies
# copied from Segaran: Collective Intelligence (2006) Ch.2
critics={
'Lisa Rose': {
'Lady in the Water': 2.5,
'Snakes on a Plane': 3.5,
'Just My Luck': 3.0,
'Superman Returns': 3.5,
'You, Me and Dupree': 2.5,
'The Night Listener': 3.0
},
'Gene Seymour': {
'Lady in the Water': 3.0,
'Snakes on a Plane': 3.5,
'Just My Luck': 1.5,
'Superman Returns': 5.0,
'You, Me and Dupree': 3.5,
'The Night Listener': 3.0
},
'Michael Phillips': {
'Lady in the Water': 2.5,
'Snakes on a Plane': 3.5,
'Superman Returns': 3.5,
'The Night Listener': 4.0
},
'Claudia Puig': {
'Snakes on a Plane': 3.5,
'Just My Luck': 3.0,
'Superman Returns': 4.0,
'You, Me and Dupree': 2.5,
'The Night Listener': 4.5
},
'Mike LaSalle': {
'Lady in the Water': 3.0,
'Snakes on a Plane': 4.0,
'Just My Luck': 2.0,
'Superman Returns': 3.0,
'You, Me and Dupree': 2.0,
'The Night Listener': 3.0
},
'Jack Matthews': {
'Lady in the Water': 3.0,
'Snakes on a Plane': 4.0,
'Superman Returns': 5.0,
'You, Me and Dupree': 3.5,
'The Night Listener': 3.0
},
'Toby': {
'Snakes on a Plane': 4.5,
'Superman Returns': 4.0,
'You, Me and Dupree': 1.0
}
}
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
from math import sqrt
def sim_distance(prefs,person1,person2):
si={}
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1
if len(si)==0: return 0
sum_of_squares=sum([pow(prefs[person1][item]-prefs[person2][item],2)
for item in prefs[person1] if item in prefs[person2]])
return 1/(1+sum_of_squares)
# This method is equivalent to sim_distance() above, uses scipy's sqeuclidean method
import scipy.spatial
def euclidean_distance(prefs,person1,person2):
vector1=[]
vector2=[]
for item in prefs[person1]:
if item in prefs[person2]:
vector1.append(prefs[person1][item])
vector2.append(prefs[person2][item])
if len(vector1)==0: return 0
euclidean_distance=scipy.spatial.distance.sqeuclidean(vector1,vector2)
return 1 / (1 + euclidean_distance)
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
def sim_pearson(prefs,p1,p2):
si={}
for item in prefs[p1]:
if item in prefs[p2]: si[item]=1
n=len(si)
if n==0: return 0
sum1=sum([prefs[p1][it] for it in si])
sum2=sum([prefs[p2][it] for it in si])
sum1Sq=sum([pow(prefs[p1][it],2) for it in si])
sum2Sq=sum([pow(prefs[p2][it],2) for it in si])
pSum=sum([prefs[p1][it]*prefs[p2][it] for it in si])
# calculate Pearson score:
num=pSum-(sum1*sum2/n)
den=sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))
if den==0: return 0
r=num/den
return r
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
def topMatches(prefs,person,n=5,similarity=sim_pearson):
scores=[(similarity(prefs,person,other), other)
for other in prefs if other!=person]
scores.sort()
scores.reverse()
return scores[0:n]
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
# Gets recommendations for a person by using weighted average
# of every other user's rankings
def getRecommendations(prefs,person,similarity=sim_pearson):
totals={}
simSums={}
for other in prefs:
if other==person: continue
sim=similarity(prefs,person,other)
if sim<=0: continue
for item in prefs[other]:
# only score movies 'person' hasn't seen
if item not in prefs[person] or prefs[person][item]==0:
# similarity*score
totals.setdefault(item,0)
totals[item]+=prefs[other][item]*sim
# sum of similarities
simSums.setdefault(item,0)
simSums[item]+=sim
rankings=[(total/simSums[item],item) for item,total in totals.items()]
rankings.sort()
rankings.reverse()
return rankings
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
def transformPrefs(prefs):
result={}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
result[item][person]=prefs[person][item]
return result
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
def calculateSimilarItems(prefs,n=10,similarity=sim_distance):
result={}
itemPrefs=transformPrefs(prefs)
c=0
for item in itemPrefs:
c+=1
if c%100==0: print "%d / %d" % (c,len(itemPrefs))
scores=topMatches(itemPrefs,item,n=n,similarity=sim_distance)
result[item]=scores
return result
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
def getRecommendedItems(prefs,itemMatch,user):
userRatings=prefs[user]
scores={}
totalSim={}
for (item,rating) in userRatings.items():
for (similarity, item2) in itemMatch[item]:
if item2 in userRatings: continue
# Weighted sum of rating times similarity
scores.setdefault(item2,0)
scores[item2]+=similarity*rating
# Sum of all the similarities
totalSim.setdefault(item2,0)
totalSim[item2]+=similarity
# Divide each total score by total weighting to give an average
rankings=[(score/totalSim[item],item) for item,score in scores.items()]
rankings.sort()
rankings.reverse()
return rankings
# Method copied from Segaran: Collective Intelligence (2006) Ch.2
def loadMovieLens(path='../data/ml-100k'):
movies={}
for line in open(path+'/u.item'):
(id,title)=line.split('|')[0:2]
movies[id]=title
prefs={}
for line in open(path+'/u.data'):
(user,movieid,rating,ts)=line.split('\t')
prefs.setdefault(user,{})
prefs[user][movies[movieid]]=float(rating)
return prefs
def loadSommelierWines(comparator='rating'):
import MySQLdb
from MySQLdb.constants import FIELD_TYPE
from MySQLdb.cursors import DictCursor
converter = { FIELD_TYPE.LONG: int }
connection = MySQLdb.connect(user="sommelier",db="sommelier",passwd="vinorosso",conv=converter)
connection.set_character_set('utf8')
cursor = connection.cursor(DictCursor)
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
cursor.execute("""
select w.name as wine, w.vintage, a.name as author, t.rating, t.notes
from wine w join tasting t on t.wine_id = w.id join author a on a.id = t.author_id
""")
results = cursor.fetchall()
prefs={}
for row in results:
user = row['author']
wine = row['wine']
vintage = row['vintage']
rating = row['rating']
notes = row['notes']
prefs.setdefault(user,{})
if comparator == 'notes':
comp = row['notes']
else:
comp = row['rating'] + 0.0
prefs[user][''.join([wine,str(vintage)])] = comp
cursor.close()
connection.close()
return prefs
def loadSommelierAuthors():
import MySQLdb
from MySQLdb.constants import FIELD_TYPE
from MySQLdb.cursors import DictCursor
converter = { FIELD_TYPE.LONG: int }
connection = MySQLdb.connect(user="sommelier",db="sommelier",passwd="vinorosso",conv=converter)
connection.set_character_set('utf8')
cursor = connection.cursor(DictCursor)
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
cursor.execute("""
select w.name as wine, w.vintage as vintage, a.name as author, t.rating as rating from wine w join tasting t on t.wine_id = w.id join author a on a.id = t.author_id
""")
results = cursor.fetchall()
authors = {}
for row in results:
author = row['author']
wine = ' '.join([row['wine'], str(row['vintage'])])
rating = row['rating']
authors.setdefault(author,{})
authors[author][wine] = rating;
cursor.close()
connection.close()
return authors
def getAuthorSimilarities(similarity=sim_pearson):
authors = loadSommelierAuthors()
sims = {}
for author1 in authors.keys():
sims.setdefault(author1, {})
for author2 in authors.keys():
if author1 == author2:
continue
sim = similarity(authors, author1, author2)
if sim != 0:
sims[author1][author2] = sim
return sims
| {
"repo_name": "pgchamberlin/sommelier",
"path": "recommendations.py",
"copies": "1",
"size": "8877",
"license": "mit",
"hash": 6912271908352344000,
"line_mean": 29.6103448276,
"line_max": 164,
"alpha_frac": 0.6092148248,
"autogenerated": false,
"ratio": 3.3612268080272623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4470441632827262,
"avg_score": null,
"num_lines": null
} |
# A dictionary of filter classes, keyed by their name attributes.
FILTER_REGISTRY = {}
class FilterRegistry(type):
"""
Metaclass used to automatically register new filter classes in our filter
registry. Enables shorthand filter notation.
>>> from es_fluent.builder import QueryBuilder
>>> query_builder = QueryBuilder()
>>> query_builder.add_filter('missing', 'boop').to_query()['filter']
{'missing': {'field': 'field_name'}}
"""
def __new__(cls, clsname, bases, attrs):
newclass = super(FilterRegistry, cls).__new__(
cls, clsname, bases, attrs)
register_filter(newclass)
return newclass
def register_filter(filter_cls):
"""
Adds the ``filter_cls`` to our registry.
"""
if filter_cls.name is None:
return
elif filter_cls.name in FILTER_REGISTRY:
raise RuntimeError(
"Filter class already registered: {}".format(filter_cls.name))
else:
FILTER_REGISTRY[filter_cls.name] = filter_cls
def build_filter(filter_or_string, *args, **kwargs):
"""
Overloaded filter construction. If ``filter_or_string`` is a string
we look up it's corresponding class in the filter registry and return it.
Otherwise, assume ``filter_or_string`` is an instance of a filter.
:return: :class:`~es_fluent.filters.Filter`
"""
if isinstance(filter_or_string, basestring):
# Names that start with `~` indicate a negated filter.
if filter_or_string.startswith('~'):
filter_name = filter_or_string[1:]
return ~FILTER_REGISTRY[filter_name](*args, **kwargs)
else:
filter_name = filter_or_string
return FILTER_REGISTRY[filter_name](*args, **kwargs)
else:
return filter_or_string
class Filter(object):
"""
The base filter. Subclasses of this Filter will automatically register
themselves on import.
"""
#: The shorthand name of the filter.
name = None
#: Auto-register any Filter subclass with our registry.
__metaclass__ = FilterRegistry
def __invert__(self):
"""
Returns this filter wrapped in a :class:`es_fluent.filters.Not` filter.
:
"""
not_filter = Not()
not_filter.add_filter(self)
return not_filter
def to_query(self):
"""
Serializes this ``Filter`` and any descendants into a json-serializable
dictionary suitable for use with the elasticsearch api.
"""
raise NotImplementedError()
from .core import (
Age,
And,
Custom,
Dict,
Exists,
Generic,
Missing,
Not,
Or,
Range,
RegExp,
Script,
ScriptID,
Term,
Terminal,
Terms,
)
from .geometry import (
GeoJSON,
IndexedShape,
)
| {
"repo_name": "planetlabs/es_fluent",
"path": "es_fluent/filters/__init__.py",
"copies": "1",
"size": "2824",
"license": "apache-2.0",
"hash": 5872935133497850000,
"line_mean": 25.641509434,
"line_max": 79,
"alpha_frac": 0.6140226629,
"autogenerated": false,
"ratio": 4.086830680173661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5200853343073661,
"avg_score": null,
"num_lines": null
} |
# A dictionary of movie critics and their ratings of a small
from math import sqrt
# function for calculating (Euclidean Distance Score) distance-based similarity score for person1 and person2
def similarity_distance(input_recs, person1, person2):
# Gather list of shared_items
shared_items = {}
for item_key in input_recs[person1]:
if item_key in input_recs[person2]:
shared_items[item_key] = 1
# Return 0 if no common ratings
if len(shared_items) == 0:
return 0
# Add the squares of the differences
sum_of_squares = sum(pow(input_recs[person1][item_key]- input_recs[person2][item_key], 2)
for item_key in input_recs[person1]
if item_key in input_recs[person2])
# Return the Euclidean Distance Score
return 1/(1 + sqrt(sum_of_squares))
# function for calculating (Pearson correlation coefficient) for p1 and p2
# This function will have a value between -1 and 1
# 1 means that two critics have the exact same ratings
def similarity_pearson(input_recs, p1, p2):
# Gather list of shared_items
shared_items = {}
for each_critic_movie_rating_key in input_recs[p1]:
if each_critic_movie_rating_key in input_recs[p2]:
shared_items[each_critic_movie_rating_key] = 1
# Were any matches found?
match_length = len(shared_items)
if match_length == 0:
return 0
# Add up all of the ratings for each person
sum_p1 = sum([input_recs[p1][each_critic_movie_rating_key]
for each_critic_movie_rating_key in shared_items])
sum_p2 = sum([input_recs[p2][each_critic_movie_rating_key]
for each_critic_movie_rating_key in shared_items])
# Sum up the squares
sqr_sum_p1 = sum([pow(input_recs[p1][each_critic_movie_rating_key], 2)
for each_critic_movie_rating_key in shared_items])
sqr_sum_p2 = sum([pow(input_recs[p2][each_critic_movie_rating_key], 2)
for each_critic_movie_rating_key in shared_items])
# Multiply the ratings for p1 and p2 together and get the Sum total
sum_multiply_p1_and_p2 = sum([input_recs[p1][each_critic_movie_rating_key]*
input_recs[p2][each_critic_movie_rating_key]
for each_critic_movie_rating_key in shared_items])
# Calculate the Pearson score
numerator = sum_multiply_p1_and_p2 - (sum_p1*sum_p2/match_length)
denominator = sqrt( (sqr_sum_p1-pow(sum_p1,2)/match_length) * (sqr_sum_p2-pow(sum_p2,2)/match_length) )
if denominator == 0:
return 0
return numerator/denominator
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}} | {
"repo_name": "jpbinary/Programming-Collective-Intelligence",
"path": "recommendations.py",
"copies": "1",
"size": "3700",
"license": "mit",
"hash": 5979811604911848000,
"line_mean": 44.1341463415,
"line_max": 109,
"alpha_frac": 0.6410810811,
"autogenerated": false,
"ratio": 2.9411764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9048734579075096,
"avg_score": 0.006704594522627843,
"num_lines": 82
} |
# A dictionary of movie critics and their ratings of a small
# set of movies
critics = {'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, 'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, 'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'The Night Listener': 4.5, 'Superman Returns': 4.0, 'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water':3.0, 'Snakes on a Plane': 4.0, 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, 'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5, 'You, Me and Dupree':1.0, 'Superman Returns':4.0}}
from math import sqrt
# Returns a distance-based similarity score for person1 and person2
def sim_distance(prefs,person1,person2):
# Get the list of shared_items
si={}
for item in prefs[person1]:
if item in prefs[person2]: si[item]=1
# if they have no ratings in common, return 0
if len(si)==0: return 0
# Add up the squares of all the differences
sum_of_squares=sum([pow(prefs[person1][item]-prefs[person2][item],2)
for item in prefs[person1] if item in prefs[person2]])
return 1/(1+sum_of_squares)
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs,p1,p2):
# Get the list of mutually rated items
si={}
for item in prefs[p1]:
if item in prefs[p2]: si[item]=1
# if they are no ratings in common, return 0
if len(si)==0: return 0
# Sum calculations
n=len(si)
# Sums of all the preferences
sum1=sum([prefs[p1][it] for it in si])
sum2=sum([prefs[p2][it] for it in si])
# Sums of the squares
sum1Sq=sum([pow(prefs[p1][it],2) for it in si])
sum2Sq=sum([pow(prefs[p2][it],2) for it in si])
# Sum of the products
pSum=sum([prefs[p1][it]*prefs[p2][it] for it in si])
# Calculate r (Pearson score)
num=pSum-(sum1*sum2/n)
den=sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))
if den==0: return 0
r=num/den
return r
# Returns the best matches for person from the prefs dictionary.
# Number of results and similarity function are optional params.
def topMatches(prefs,person,n=5,similarity=sim_pearson):
scores=[(similarity(prefs,person,other),other)
for other in prefs if other!=person]
scores.sort()
scores.reverse()
return scores[0:n]
# Gets recommendations for a person by using a weighted average
# of every other user's rankings
def getRecommendations(prefs,person,similarity=sim_pearson):
totals={}
simSums={}
for other in prefs:
# don't compare me to myself
if other==person: continue
sim=similarity(prefs,person,other)
# ignore scores of zero or lower
if sim<=0: continue
for item in prefs[other]:
# only score movies I haven't seen yet
if item not in prefs[person] or prefs[person][item]==0:
# Similarity * Score
totals.setdefault(item,0)
totals[item]+=prefs[other][item]*sim
# Sum of similarities
simSums.setdefault(item,0)
simSums[item]+=sim
# Create the normalized list
rankings=[(total/simSums[item],item) for item,total in totals.items()]
# Return the sorted list
rankings.sort()
rankings.reverse()
return rankings
def transformPrefs(prefs):
result={}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
# Flip item and person
result[item][person]=prefs[person][item]
return result
| {
"repo_name": "jefflyn/buddha",
"path": "src/recommendations/recommend.py",
"copies": "1",
"size": "3938",
"license": "artistic-2.0",
"hash": -935624347580134400,
"line_mean": 34.4774774775,
"line_max": 177,
"alpha_frac": 0.6584560691,
"autogenerated": false,
"ratio": 2.963130173062453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4121586242162453,
"avg_score": null,
"num_lines": null
} |
# A dictionary of movie critics and their ratings of a small
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby Segaran': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}
}
from math import sqrt
import numpy as np
import operator
def sim_euclid_normed(prefs, person1, person2):
return sim_euclid(prefs, person1, person2, True)
def sim_euclid(prefs,person1,person2,normed=False):
''' Returns a euclidean-distance-based similarity score for
person1 and person2. In the distance calculation the sum is computed
only over those items, which are nonzero for both instances, i.e. only
films which are ranked by both persons are regarded.
If the parameter normed is True, then the euclidean distance is divided by
the number of non-zero elements integrated in the distance calculation. Thus
the effect of larger distances in the case of an increasing number of commonly ranked
items is avoided.
'''
# Get the list of shared_items
si={}
for item in prefs[person1]:
if item in prefs[person2]: si[item]=1
# len(si) counts the number of common ratings
# if they have no ratings in common, return 0
if len(si)==0: return 0
# Add up the squares of all the differences
sum_of_squares=sqrt(sum([pow(prefs[person1][item]-prefs[person2][item],2)
for item in prefs[person1] if item in prefs[person2]]))
if normed:
sum_of_squares= 1.0/len(si)*sum_of_squares
return 1/(1+sum_of_squares)
def sim_pearson(prefs,p1,p2):
'''
Returns the Pearson correlation coefficient for p1 and p2
'''
# Get the list of commonly rated items
si={}
for item in prefs[p1]:
if item in prefs[p2]: si[item]=1
# if they are no ratings in common, return 0
if len(si)==0: return 0
# Sum calculations
n=len(si)
# Calculate means of person 1 and 2
mp1=np.mean([prefs[p1][it] for it in si])
mp2=np.mean([prefs[p2][it] for it in si])
# Calculate standard deviation of person 1 and 2
sp1=np.std([prefs[p1][it] for it in si])
sp2=np.std([prefs[p2][it] for it in si])
# If all elements in one sample are identical, the standard deviation is 0.
# In this case there is no linear correlation between the samples
if sp1==0 or sp2==0:
return 0
r=1/(n*sp1*sp2)*sum([(prefs[p1][it]-mp1)*(prefs[p2][it]-mp2) for it in si])
return r
def sim_RusselRao(prefs,person1,person2,normed=True):
''' Returns RusselRao similaritiy between 2 users. The RusselRao similarity just counts the number
of common non-zero components of the two vectors and divides this number by N, where N is the length
of the vectors. If normed=False, the division by N is omitted.
'''
# Get the list of shared_items
si={}
commons=0
for item in prefs[person1]:
if prefs[person1][item]==1 and prefs[person2][item]==1:
commons+=1
#print commons
if not normed:
return commons
else:
return commons*1.0/len(prefs[person1])
def topMatches(prefs, person, similarity):
sim = {}
for candidate in prefs:
if candidate == person:
continue
sim[candidate] = similarity(prefs, person, candidate)
return sorted(sim.iteritems(), key=operator.itemgetter(1), reverse=True)
def getRecommendations(prefs, person, similarity):
# compute correlations
sim = {}
for candidate in prefs:
if candidate == person:
continue
sim[candidate] = similarity(prefs, person, candidate)
kSums = {}
unknownMedia = {}
for candidate in prefs:
# don't compare a person with itself
if candidate == person:
continue
# if the correlation is negative the persons are too different.
if sim[candidate] < 0:
continue
# for every media the candidate already knows
for media in prefs[candidate]:
# ... check if the person doesn't know it, too.
if media not in prefs[person] or prefs[person][media] == 0:
# check if the not yet seen media is already in the unknownMedia list.
# if not add it to unknownMedia and kSums with value 0
if media not in unknownMedia:
unknownMedia[media] = 0
kSums[media] = 0
# add the correlation of the candidate to the kSum of the current media.
kSums[media] += sim[candidate]
# add the recommendation of the candidate times the correlation to the sum of the current media
unknownMedia[media] += sim[candidate] * prefs[candidate][media]
# divide the sum of the media by the kSum of the media
for media in unknownMedia:
unknownMedia[media] = unknownMedia[media]/kSums[media]
# switch keys and values in the dictionary to get tuples of (recommendation , name) instead of (name , recommendation) later
# unknownMedia = {y:x for x,y in unknownMedia.iteritems()}
# sort dictionary into list by descending keys (recommendation score)
list = sorted(unknownMedia.iteritems(), key=lambda t: t[1], reverse=True)
return list
def createLastfmUserDict(userNames):
NUMBER_OF_BANDS = 20
allBands = {}
userDict = {}
# adding all bands from the users to the allBand dictionary
for user in userNames:
topBands = user.get_top_artists()[0:NUMBER_OF_BANDS]
for band in topBands:
allBands[str(band.item)] = 0 #item is the name
# creating the dictionary with 0/1 bands for every user
for user in userNames:
topBands = user.get_top_artists()[0:NUMBER_OF_BANDS]
userDict[str(user)] = allBands.copy()
for band in allBands:
for topBand in topBands:
if str(band) == str(topBand.item):
userDict[str(user)][band] = 1
return userDict
def topMatches(prefs, person, similarity):
sim = {}
list = []
for candidate in prefs:
if candidate == person:
continue
sim[candidate] = similarity(prefs, person, candidate)
return sorted(sim.iteritems(), key=operator.itemgetter(1), reverse=True)
def getAllProducts(prefs):
products = []
for person in prefs:
for product in prefs[person]:
if product not in products: products.append(product)
return products
def transposeMatrix(prefs):
products = getAllProducts(prefs)
transCritics = {}
for product in products:
for person in prefs:
if product in prefs[person]:
if product not in transCritics:
transCritics[product] = {}
transCritics[product][person] = prefs[person][product]
return transCritics
def calculateSimilarItems(prefs, similarity):
'''
processes similarity between all movies and returns a dictionary.
'''
similarity_matrix = {}
for pref in prefs: similarity_matrix[pref] = dict(topMatches(prefs, pref, similarity))
return similarity_matrix
def getRecommendedItems(prefs, name, similar_items):
rated_items = prefs[name]
unrated_items = []
recommendations = {}
transCritics = transposeMatrix(prefs)
# save unrated items in new list unrated_items
for item in transCritics:
if item not in rated_items: unrated_items.append(item)
for unrated_item in unrated_items:
sum_similarity = 0.0
item_rated_similarity = 0.0
for rated_item in rated_items:
similarity = similar_items[rated_item][unrated_item]
if similarity > 0.0:
sum_similarity += similarity
item_rated_similarity += similarity * prefs[name][rated_item]
if sum_similarity > 0.0:
recommendations[unrated_item] = item_rated_similarity / sum_similarity
return sorted(recommendations.iteritems(), key=operator.itemgetter(1), reverse=True) | {
"repo_name": "stefanseibert/DataMining",
"path": "experiment02/movies/recommendations.py",
"copies": "2",
"size": "8805",
"license": "mit",
"hash": 1139695102216838400,
"line_mean": 34.9428571429,
"line_max": 128,
"alpha_frac": 0.6501987507,
"autogenerated": false,
"ratio": 3.53472501003613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.518492376073613,
"avg_score": null,
"num_lines": null
} |
# A dictionary of movie critics and their ratings of a small
# set of movies
critics={
'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}
}
from math import sqrt
# Returns a distance-based similarity score for person1 and person2
def sim_distance(prefs, person1, person2);
# Get the list of shared_items
si = {}
for item in prefs[person1]
if item in prefs[person2]
si[item] = 1
# if they have no ratings in common, return 0
if len(si) == 0: return 0
# Add up the squares of all the differences
sum_of_squares = sum([pow(prefs[person1][item] - prefs[person2][item], 2)
for item in prefs[person1] if item in prefs[person2]])
return 1/(1+sqrt(sum_of_squares)
# Returns the Pearson correlation coefficient for p1 and person2
def sim_pearson(prefs, p1, p2)
# Get the list of mutually rated items
si = {}
for item in prefs[p1]:
if item in prefs[p2]: si[item] = 1
# Find the number of elements
n = len(si)
# if they are no rating in common, return 0
if n==0: return 0
# Add up all the preferences
sum1 = sum([prefs[p1][it] for it in si])
sum2 = sum([prefs[p2][it] for it in si])
# Sum up teh squares
sum1Sq = sum([pow(prefs[p1][it], 2) for it in si])
sum2Sq = sum([pow(prefs[p2][it], 2) for it in si])
# Sum up the products
pSum = sum([prefs[p1][it] * prefs[p2][it] for it in si])
# Calculate Pearson score
num = pSum - (sum1*sum2/n)
den = sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2, 2)/n))
if den == 0: return 0;
r = num/den;
return r;
# Returns the best matches person from the prefs dictionary
# Number of results and similarity function are optional params
def topMatches(prefs, person, n=5, similarity=sim_pearson):
score = [(similarity(prefs, person, other), other)
for other in prefs if other != person]
# Sort the list so the heighest scores appear at the top
scores.sort()
scores.reverse()
return scores[0:n]
| {
"repo_name": "escray/PCI",
"path": "recommendations.py",
"copies": "1",
"size": "2895",
"license": "mit",
"hash": 8325683860180090000,
"line_mean": 31.8977272727,
"line_max": 81,
"alpha_frac": 0.6531951641,
"autogenerated": false,
"ratio": 2.7234242709313263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8876619435031325,
"avg_score": 0,
"num_lines": 88
} |
# A dictionary of movie critics and their ratings of a small
# set of movies
from math import sqrt
critics = {'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane': 4.5, 'You, Me and Dupree': 1.0, 'Superman Returns': 4.0}}
def sim_distance(prefs, person1, person2):
shared_item = {}
for item in prefs[person1]:
if item in prefs[person2]:
shared_item[item] = 1
if len(shared_item) == 0:
return 0
sum_of_squares = sum([pow(prefs[person1][item] - prefs[person2][item], 2)
for item in prefs[person1] if item in prefs[person2]])
return 1 / (1 + sum_of_squares)
def sim_pearson(prefs, p1, p2):
si = {}
for item in prefs[p1]:
if item in prefs[p2]: si[item] = 1
n = len(si)
if n == 0: return 0
sum1 = sum([prefs[p1][it] for it in si])
sum2 = sum([prefs[p2][it] for it in si])
sum1Sq = sum([pow(prefs[p1][it], 2) for it in si])
sum2Sq = sum([pow(prefs[p2][it], 2) for it in si])
pSum = sum([prefs[p1][it] * prefs[p2][it] for it in si])
num = pSum - (sum1 * sum2 / n)
den = sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n))
if den == 0: return 0
r = num / den
return r
def topMatches(prefs, person, n=5, similarity=sim_pearson):
scores = [(similarity(prefs, person, other), other)
for other in prefs if other != person]
scores.sort()
scores.reverse()
return scores[0:n]
def getRecommendations(prefs, person, similarity=sim_pearson):
totals = {}
simSums = {}
for other in prefs:
# don't compare me to myself
if other == person: continue
sim = similarity(prefs, person, other)
# ignore scores of zero or lower
if sim <= 0: continue
for item in prefs[other]:
# only score movies I haven't seen yet
if item not in prefs[person] or prefs[person][item] == 0:
# Similarity * Score
totals.setdefault(item, 0)
totals[item] += prefs[other][item] * sim
# Sum of similarities
simSums.setdefault(item, 0)
simSums[item] += sim
# Create the normalized list
rankings = [(total / simSums[item], item) for item, total in totals.items()]
# Return the sorted list
rankings.sort()
rankings.reverse()
return rankings
print(sim_distance(critics, 'Lisa Rose', 'Gene Seymour'))
print(sim_pearson(critics, 'Lisa Rose', 'Gene Seymour'))
print(topMatches(critics, 'Lisa Rose', n=3)) | {
"repo_name": "przemek1990/machine-learning",
"path": "src/machine_learning/collaborative_filtering/recommendations.py",
"copies": "1",
"size": "3791",
"license": "apache-2.0",
"hash": -7556080987227400000,
"line_mean": 39.7741935484,
"line_max": 108,
"alpha_frac": 0.541018201,
"autogenerated": false,
"ratio": 3.1565362198168194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4197554420816819,
"avg_score": null,
"num_lines": null
} |
# A dictionary of movie critics and their ratings of a small
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
from math import sqrt
# Returns a distance-based similarity score for person1 and person2
def sim_distance(prefs,person1,person2):
# Get the list of shared_items
si={}
for item in prefs[person1]:
if item in prefs[person2]: si[item]=1
# if they have no ratings in common, return 0
if len(si)==0: return 0
# Add up the squares of all the differences
sum_of_squares=sum([pow(prefs[person1][item]-prefs[person2][item],2)
for item in prefs[person1] if item in prefs[person2]])
return 1/(1+sum_of_squares)
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs,p1,p2):
# Get the list of mutually rated items
si={}
for item in prefs[p1]:
if item in prefs[p2]: si[item]=1
# if they are no ratings in common, return 0
if len(si)==0: return 0
# Sum calculations
n=len(si)
# Sums of all the preferences
sum1=sum([prefs[p1][it] for it in si])
sum2=sum([prefs[p2][it] for it in si])
# Sums of the squares
sum1Sq=sum([pow(prefs[p1][it],2) for it in si])
sum2Sq=sum([pow(prefs[p2][it],2) for it in si])
# Sum of the products
pSum=sum([prefs[p1][it]*prefs[p2][it] for it in si])
# Calculate r (Pearson score)
num=pSum-(sum1*sum2/n)
den=sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))
if den==0: return 0
r=num/den
return r
# Returns the best matches for person from the prefs dictionary.
# Number of results and similarity function are optional params.
def topMatches(prefs,person,n=5,similarity=sim_pearson):
scores=[(similarity(prefs,person,other),other)
for other in prefs if other!=person]
scores.sort()
scores.reverse()
return scores[0:n]
# Gets recommendations for a person by using a weighted average
# of every other user's rankings
def getRecommendations(prefs,person,similarity=sim_pearson):
totals={}
simSums={}
for other in prefs:
# don't compare me to myself
if other==person: continue
sim=similarity(prefs,person,other)
# ignore scores of zero or lower
if sim<=0: continue
for item in prefs[other]:
# only score movies I haven't seen yet
if item not in prefs[person] or prefs[person][item]==0:
# Similarity * Score
totals.setdefault(item,0)
totals[item]+=prefs[other][item]*sim
# Sum of similarities
simSums.setdefault(item,0)
simSums[item]+=sim
# Create the normalized list
rankings=[(total/simSums[item],item) for item,total in totals.items()]
# Return the sorted list
rankings.sort()
rankings.reverse()
return rankings
def transformPrefs(prefs):
result={}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
# Flip item and person
result[item][person]=prefs[person][item]
return result
def calculateSimilarItems(prefs,n=10):
# Create a dictionary of items showing which other items they
# are most similar to.
result={}
# Invert the preference matrix to be item-centric
itemPrefs=transformPrefs(prefs)
c=0
for item in itemPrefs:
# Status updates for large datasets
c+=1
if c%100==0: print "%d / %d" % (c,len(itemPrefs))
# Find the most similar items to this one
scores=topMatches(itemPrefs,item,n=n,similarity=sim_distance)
result[item]=scores
return result
def getRecommendedItems(prefs,itemMatch,user):
userRatings=prefs[user]
scores={}
totalSim={}
# Loop over items rated by this user
for (item,rating) in userRatings.items( ):
# Loop over items similar to this one
for (similarity,item2) in itemMatch[item]:
# Ignore if this user has already rated this item
if item2 in userRatings: continue
# Weighted sum of rating times similarity
scores.setdefault(item2,0)
scores[item2]+=similarity*rating
# Sum of all the similarities
totalSim.setdefault(item2,0)
totalSim[item2]+=similarity
# Divide each total score by total weighting to get an average
rankings=[(score/totalSim[item],item) for item,score in scores.items( )]
# Return the rankings from highest to lowest
rankings.sort( )
rankings.reverse( )
return rankings
def loadMovieLens(path='/data/movielens'):
# Get movie titles
movies={}
for line in open(path+'/u.item'):
(id,title)=line.split('|')[0:2]
movies[id]=title
# Load data
prefs={}
for line in open(path+'/u.data'):
(user,movieid,rating,ts)=line.split('\t')
prefs.setdefault(user,{})
prefs[user][movies[movieid]]=float(rating)
return prefs
| {
"repo_name": "YeEmrick/learning",
"path": "PCI/PCI_Code/chapter2/recommendations.py",
"copies": "5",
"size": "5856",
"license": "apache-2.0",
"hash": -4412271357255637500,
"line_mean": 30.5333333333,
"line_max": 82,
"alpha_frac": 0.6461748634,
"autogenerated": false,
"ratio": 3.1450053705692804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.05357342624238911,
"num_lines": 180
} |
#A dictionary of movie critics and their ratings of a small set of movies.
critics = {
'Lisa Rose': {
'Lady in the water': 2.5,
'Snakes on a plane': 3.5,
'Just my luck': 3.0,
'Superman returns': 3.5,
'You, me and dupree': 2.5,
'The night listener': 3.0
},
'Gene Seymour': {
'Lady in the water': 3.0,
'Snakes on a plane': 3.5,
'Just my luck': 1.5,
'Superman returns': 5.0,
'The night listener': 3.0,
'You, me and dupree': 3.5
},
'Michael Phillips': {
'Lady in the water': 2.5,
'Snakes on a plane': 3.0,
'Just my luck': 1.5,
'Superman returns': 3.5,
'The night listener': 4.0
},
'Claudia Puig': {
'Snakes on a plane': 3.5,
'Just my luck': 3.0,
'Superman returns': 4.0,
'You, me and dupree': 2.5,
'The night listener': 4.5
},
'Mick LaSalle': {
'Lady in the water': 3.0,
'Snakes on a plane': 4.0,
'Just my luck': 2.0,
'Superman returns': 3.0,
'You, me and dupree': 2.0,
'The night listener': 3.0
},
'Jack Matthews': {
'Lady in the water': 3.0,
'Snakes on a plane': 4.0,
'Superman returns': 5.0,
'You, me and dupree': 3.5,
'The night listener': 3.0
},
'Toby': {
'Snakes on a plane': 4.5,
'Superman returns': 4.0,
'You, me and dupree': 1.0,
}}
topics = {
'Argenti': {
'Sci-fi': 1,
'Action': 1,
'Grotesque': 0,
'Horror': 0,
'Erotic': 1,
'War': 0,
'Gay': 0,
'Drama': 0,
'Crime': 0,
'Classics': 0,
'Thriller': 1
},
'Trulli': {
'Sci-fi': 1,
'Action': 1,
'Grotesque': 0,
'Horror': 0,
'Erotic': 0,
'War': 1,
'Gay': 0,
'Drama': 0,
'Crime': 1,
'Classics': 0,
'Thriller': 0,
'Historic': 0
},
'Cazzaniga': {
'Sci-fi': 0,
'Grotesque': 1,
'Horror': 1,
'Erotic': 0,
'War': 0,
'Gay': 1,
'Crime': 0,
'Classics': 0,
'Thriller': 0,
'Porn':1,
'Historic': 1
},
'Albini': {
'Sci-fi': 1,
'Action': 1,
'Grotesque': 0,
'Horror': 1,
'War': 1,
'Gay': 1,
'Drama': 0,
'Crime': 1,
'Classics': 0,
'Thriller': 0,
'Porn': 1,
'Historic': 1
}
}
from math import sqrt
def euclideanDistance(prefs, person1, person2):
"""Take common items people have rated in critics.
Calculate distance between two people tastes by
taking the difference between two movies on x,y axis, squaring them and then
add them together. To get a % value 1/result.
This is the Euclidean distance score.
:param prefs:
Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
:param person1:
Rating of first movie for second person, int.
:param person2:
Rating of second movie by first person, int.
Return int with similarity score.
"""
#List of shared items
sharedItems = {}
for item in prefs[person1]:
if item in prefs[person2]:
sharedItems[item] = 1 #random value could be anything!
if len(sharedItems) == 0: return 0
#Add up squares of all differences:
sumOfSquares = sum([pow(prefs[person1][item] - prefs[person2][item],2) for item in sharedItems])
return 1/(1+sqrt(sumOfSquares))
def pearsonCorrelation(prefs, p1, p2):
"""Calculates Pearson correlation between two people.
Corrects not normalized data.
1. Find items rated by both critics.
2. Calculate sum of ratings for both people.
3. Sum the ratings squared.
4. Sum product of ratings.
:param prefs:
Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
:param person1:
Rating of first movie for second person, int.
:param person2:
Rating of second movie by first person, int.
Return int with Pearson correlation score.
"""
sharedItems = {}
for item in prefs[p1]:
if item in prefs[p2]:
sharedItems[item] = 1
totalCommonElements = len(sharedItems)
if totalCommonElements == 0: return 0
#Calculate sum of ratings for both people:
sum1 = sum([prefs[p1][it] for it in sharedItems])
sum2 = sum([prefs[p2][it] for it in sharedItems])
#Calculate sum of the ratings squared
sum1Sq = sum([pow(prefs[p1][it],2) for it in sharedItems])
sum2Sq = sum([pow(prefs[p2][it],2) for it in sharedItems])
#Sum product of ratings
pSum = sum([prefs[p1][it] * prefs[p2][it] for it in sharedItems])
#Calculate Pearson score, how much the variables change together divided by the product of how much they vary individually.
num = pSum - (sum1 * sum2/totalCommonElements)
den = sqrt((sum1Sq - pow(sum1, 2)/totalCommonElements) * (sum2Sq - pow(sum2, 2)/totalCommonElements))
if den == 0: return 0
return num/den
def topMatches(prefs, person, n=5, similarity = pearsonCorrelation):
"""Compares all critics in prefs with person specified, returns list of most related people descending.
:param prefs:
Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
:param person:
Name to compare other critics to, string.
:param n:
Number of items in list to return, int.
:param similarity:
Which similarity function to use when comparing items in prefs, function.
Return list with most similar items, descending, highest at the top.
"""
#similarity passes the input params to the function specified in similarity
scores = [(similarity(prefs, person, other), other) for other in prefs if other != person]
scores.sort()
scores.reverse()
return scores[0:n]
def getRecommendations(prefs, person, similarity = pearsonCorrelation):
"""Computes list of videos I am most likely to watch.
Based on my affinity to other critics and their score on a particular movie.
Divided by the sum of all similarities.
:param prefs:
Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
:param person:
Name to compare other critics to, string.
:param similarity:
Which similarity function to use when comparing items in prefs, function.
Return list of videos I am most likely to enjoy.
"""
totals = {}
simSums = {}
for other in prefs:
if other == person: continue #dont compare me to myself
sim = similarity(prefs, person, other)
if sim<=0: continue #if similarity score is zero or lower
for item in prefs[other]:
#only compare movies I haven't seen
if item not in prefs[person] or prefs [person][item]==0:
totals.setdefault(item, 0)
totals[item] += prefs[other][item] * sim #<- similarity of me to critics * vote given by other person to specific item
simSums.setdefault(item, 0)
simSums[item] += sim
rankings = [(total/simSums[item],item) for item, total in totals.items()] #normalized list of tuples of similarity to me, video name
rankings.sort()
rankings.reverse()
return rankings
def transformPrefs(prefs):
"""Swap names with items to get a list of most similar items to a specific item.
Same procedure as most similar critics compared to a specific critic.
:param prefs:
Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
Return dict with transposed items and names.
"""
result = {}
for person in prefs:
for item in prefs[person]:
result.setdefault(item, {}) #creates dict with item as key and dict as value.
result[item][person] = prefs[person][item]
return result
def calculateSimilarItems(prefs, n=10):
"""Creates a dict of items with associated most similar items to one being considered.
Item based collaborative filtering.
:param prefs:
Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
:param n:
Number of items in list to return, int.
Return dict with item in key and most similar items in value.
"""
result = {}
#invert preference matrix to put item in key.
itemPrefs = transformPrefs(prefs)
c = 0
for item in itemPrefs:
#large datasets
c+=1
if c%100==0: print "%d / %d" % (c, len(itemPrefs))
#find most similar item to one being considered.
scores = topMatches(itemPrefs, item, n=n, similarity = pearsonCorrelation)
result[item] = scores
return result
def getRecommendedItems(prefs, itemMatch, user):
"""Generate list of recommended items based on user preferences.
Get rating for a movie.
Multiply this rating with similarity score of this movie with another I have not watched.
Repeat this process for all movies related to first movie I have not watched.
Sum similarity scores for movies I have watched with movie I have not watched.
Sum products of multiplication between (similarity score for each movie I have watched with movie I have not watched) and my rating.
Divide total sum of multiplication by total of similarity scores for movies I have watched with movie I have not watched. Pag24
:param prefs:
Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
:param itemMatch:
Item name with related items by descending order, dict.
:param user:
Name of user to generate recommendations for, string.
Return list with tuples with likelyhood of enjoying an item and item name.
"""
userRatings = prefs[user]
scores = {}
totalSim = {}
# Loop over items rated by this user
for (item, rating) in userRatings.items():
#Loop over items similar to this one.
for (similarity, item2) in itemMatch[item]:
#Ignore item if user has already rated this item
if item2 in userRatings: continue
#Weighted sum of rating times similarity
scores.setdefault(item2, 0)
scores[item2] += similarity * rating #item2 is item not already rated.
#Sum of all the similarities
totalSim.setdefault(item2, 0)
totalSim[item2] += similarity
#Divide total score of similarity*rating by total weight of similarities to get an average
rankings = [(score/totalSim[item], item) for item, score in scores.items()]
rankings.sort()
rankings.reverse()
return rankings
def loadMovieLens(path='/home/legionovainvicta/app/collective/ch2/data/movielens'):
"""Uses MovieLens.org database to generate recommendations for similar movies taking one user into account.
:param path:
Location of dataset, string.
Return dict with user info and rated movies.
"""
movies = {}
for line in open(path+'/u.item'):
(id, title) = line.split('|')[0:2]
movies[id] = title
#load data
prefs = {}
for line in open(path+'/u.data'):
(user, movieid,rating,ts) = line.split('\t')
prefs.setdefault(user, {})
prefs[user][movies[movieid]] = float(rating)
return prefs
def tanimotoScore(prefs, refTopic, otherTopic ):
"""Calculates tanimoto score for reference input.
:param prefs:
Input dict. Holds in key person name and in value subkey movie name and in subvalue rating information, dict.
:param refTopic:
Original topic to compare similar topics to based on wether people follow that topic or not, string.
:param otherTopic:
Other topic to compare similarity with reference topic, string.
Return int with tanimoto score.
"""
a = b = c = 0
from pprint import pprint
#Prefs needs to be normalized to account for missing topics in each user data.
allTopics = []
for person in prefs.values():
for topic in person:
if topic not in allTopics:
allTopics.append(topic)
#Make sure matrices all are the same.
for t in allTopics:
for person in prefs:
topicsByPerson = [key for key in prefs[person]]
if t not in topicsByPerson:
prefs[person][t] = 0
#pprint(prefs)
for person in prefs:
c += prefs[person][refTopic] & prefs[person][otherTopic]
a += prefs[person][refTopic]
b += prefs[person][otherTopic]
if not c:
return 0
return float(c)/(a+b-c) | {
"repo_name": "andrea-f/Python-Collective-Intelligence-Examples",
"path": "ch2/recommendations.py",
"copies": "1",
"size": "12124",
"license": "bsd-3-clause",
"hash": 5957068341949410000,
"line_mean": 31.5067024129,
"line_max": 136,
"alpha_frac": 0.6567964368,
"autogenerated": false,
"ratio": 3.5111497248769186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9564564238818534,
"avg_score": 0.020676384571677118,
"num_lines": 373
} |
"""A dictionary that flattens a nested key-value-mapping-and-list
structure into a store that does not support nesting.
"""
import collections
import logging
from . import python_copy
from .flatpath import FlatPath
from . import flatpath
from .data import JSON_VALUE_TYPES
from . import treeutils
LOGGER = logging.getLogger('jsdb.flatdict')
ASCII_TOP = '\xff'
class JsonFlatteningDict(collections.MutableMapping):
"Flatten nested list and dictionaries down to a string to value mapping"
# The format of the keys is
# path := dot (dict_key (path | equals | pound))? | bracket (list_index (path | equals | pound)) |
# list_index := integer right_bracket
# dict_key := " dict_key_string "
# `equals` is the string "="
# `bracket` is the string "["
# `dict_key_string` python escape string
# Example
# ."hello"[0]."world"=
# stores the value of d["hello"][0]["world"]
# ."hello"#
# stores the the length of the dictionary or list d["hello"]
# ."hello".
# indicates that d["hello"] is a dictionary (possibly empty)
# ."hello"[
# indicates that d["hello"] is a list (possibly empty)
# We must enforce things like:
# not having more than precisely value, list, or dictionary path for the same prefix
def __repr__(self):
return '<JsonFlatteningDict path={!r}>'.format(self._prefix)
def __init__(self, underlying, prefix=''):
self._prefix = prefix
self._path = FlatPath(prefix)
self._underlying = underlying
self._flat_store = FlatteningStore(self._underlying)
def __getitem__(self, key):
if not isinstance(key, str):
raise ValueError(key)
item_prefix = self._path.dict().lookup(key).key()
return self._flat_store.lookup(item_prefix)
def __len__(self):
return self._underlying.get(self._path.length().key(), 0)
def _set_length(self, value):
self._underlying[self._path.length().key()] = value
def __iter__(self):
key_after = treeutils.key_after_func(self._underlying)
if key_after:
return self._key_after_iter(key_after)
else:
return self._bad_depth_scaling_iter()
def _bad_depth_scaling_iter(self):
# If we can't do ordering based queries on keys
# then iteration is O(total nodes)
for k in self._underlying:
if self._is_child_key(k):
child_path = FlatPath(k)
yield child_path.prefix().key_string()
def _is_child_key(self, key):
child_path = FlatPath(key)
try:
return child_path.prefix().parent().key() == self._prefix
except flatpath.RootNode:
return False
def _key_after_iter(self, key_after):
# If we can do ordering-based lookups (and hence
# prefix-based) queries efficiently then
# iter becomes a lot more efficient
# Commence literate programming! (Too complicated to
# be understood with code alone)
# We start with something like "a"
# We want to find something like "a"."b"
# but not "a". or "a"#
# So we search for things after "a".
# the result found is guaranteed to be a child
# because "a"."b". and "a"."b"[ precede
# their descendants
try:
found_key = key_after(self._path.dict().key())
child_path = FlatPath(found_key)
except KeyError:
return
last_yielded = None # keys have to be strings
while True:
if not child_path.key().startswith(self._path.dict().key()):
break
yielded = child_path.prefix().key_string()
if yielded != last_yielded:
yield yielded
last_yielded = yielded
# We have something like "a"."b". or "a"."b"[ or "a"."b"=
# We want to skip over all the children
# so we want to look for "a"."b".TOP "a"."b"[TOP or "a"."b"=
try:
# this is a child because the type string always precedes it's children
child_key = key_after(child_path.key() + ASCII_TOP)
child_path = FlatPath(child_key)
except KeyError:
break
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
else:
self._flat_store.purge_prefix(self._path.dict().lookup(key).key())
self._set_length(len(self) - 1)
def __setitem__(self, key, value):
#LOGGER.debug('%r: Setting %r -> %r', self, key, value)
if isinstance(key, unicode):
key = key.encode('ascii')
if not isinstance(key, str):
raise ValueError(key)
# Special case: no-op self assignment. e.g. d["a"] = d["a"]
if isinstance(value, (JsonFlatteningDict, JsonFlatteningList)):
if self._path.dict().lookup(key).key() == value._path.key():
return
# Deepcopy first to allow assignment from within
# ourselves. e.g. d["a"] = d["a"]["child"]
if isinstance(value, (collections.Sequence, collections.Mapping)):
value = python_copy.copy(value)
if isinstance(value, JSON_VALUE_TYPES):
self.pop(key, None)
flat_key = self._path.dict().lookup(key).value().key()
self._underlying[flat_key] = value
self._set_length(len(self) + 1)
elif isinstance(value, (dict, collections.MutableMapping)):
self.pop(key, None)
base_path = self._path.dict().lookup(key)
self._underlying[base_path.dict().key()] = True
self._set_length(len(self) + 1)
dict_store = self[key]
for dict_key in list(value):
dict_store[dict_key] = value[dict_key]
elif isinstance(value, (list, collections.MutableSequence)):
self.pop(key, None)
base_path = self._path.dict().lookup(key)
self._underlying[base_path.list().key()] = True
self._set_length(len(self) + 1)
list_store = self[key]
for item in list(value):
list_store.append(item)
else:
raise ValueError(value)
def copy(self):
return {k: self[k] for k in self.keys()}
class JsonFlatteningList(collections.MutableSequence):
def __init__(self, underlying, prefix):
self._prefix = prefix
self._underlying = underlying
self._flat_store = FlatteningStore(self._underlying)
self._path = FlatPath(prefix)
def __repr__(self):
return '<JsonFlatteningList path={!r}>'.format(self._prefix)
def __getitem__(self, index):
index = self._simplify_index(index)
return self._getitem(index)
def __len__(self):
return self._underlying.get(self._path.length().key(), 0)
def _set_length(self, value):
self._underlying[self._path.length().key()] = value
def _simplify_index(self, index):
length = len(self)
if -length <= index < 0:
return len(self) + index
elif index < len(self):
return index
else:
raise IndexError(index)
def _getitem(self, index):
item_prefix = self._path.list().index(index)
try:
return self._flat_store.lookup(item_prefix.key())
except KeyError:
raise IndexError(index)
def __setitem__(self, index, value):
# special case no-op self: assignment
# a[1] = a[1]
if isinstance(value, (JsonFlatteningDict, JsonFlatteningList)):
if self._path.list().index(index).key() == value._path.key():
return
if isinstance(value, (collections.Sequence, collections.Mapping)):
value = python_copy.copy(value)
if isinstance(index, slice):
if index.start == index.stop == index.step == None:
# Support complete reassignment because
self._flat_store.purge_prefix(self._path.list().key())
self._underlying[self._path.list().key()] = True
self._set_length(0)
for item in value:
self.append(item)
else:
raise NotImplementedError()
else:
self._set_item(index, value)
def _set_item(self, index, value, check_index=True):
if check_index:
if not 0 <= index < len(self):
raise IndexError('assignment out of range')
# Deepcopy first to allow assignment from within
# ourselves. e.g. d["a"] = d["a"]["child"]
if isinstance(value, (collections.Sequence, collections.Mapping)):
value = python_copy.copy(value)
self._flat_store.purge_prefix(self._path.list().index(index).key())
if isinstance(value, JSON_VALUE_TYPES):
self._underlying[self._path.list().index(index).value().key()] = value
elif isinstance(value, dict):
dict_key = self._path.list().index(index)
self._underlying[dict_key.dict().key()] = True
nested_dict = self[index]
for key, nested_value in list(value.items()):
nested_dict[key] = nested_value
elif isinstance(value, list):
list_key = self._path.list().index(index)
self._underlying[list_key.list().key()] = True
nested_list = self[index]
for nested_value in value:
nested_list.append(nested_value)
else:
raise ValueError(value)
def __delitem__(self, index):
index = self._simplify_index(index)
length = len(self)
if not 0 <= index < length:
raise IndexError(index)
for i in range(length - 1):
if i < index:
continue
else:
self[i] = self[i + 1]
self._flat_store.purge_prefix(self._path.list().index(length - 1).key())
self._set_length(length - 1)
def insert(self, pos, value):
# We need to do our own value shifting
inserted_value = value
length = len(self)
# Copy upwards to avoid temporary values
self._set_length(length + 1)
for i in range(length, pos, -1):
self._set_item(i, self[i - 1], check_index=False)
self._set_item(pos, inserted_value, check_index=False)
class FlatteningStore(object):
def __init__(self, underlying):
self._underlying = underlying
def lookup(self, item_prefix):
"Lookup a value in the json flattening store underlying"
item_path = FlatPath(item_prefix)
has_terminal_key = self._has_terminal_key(item_prefix)
has_dict_key = self._has_dict_key(item_prefix)
has_list_key = self._has_list_key(item_prefix)
if len([x for x in (has_terminal_key, has_dict_key, has_list_key) if x]) > 1:
key_types = (
(['terminal'] if has_terminal_key else []) +
(['dict'] if has_dict_key else []) +
(['list'] if has_list_key else []))
raise Exception("{!r} has duplicate key types {!r}".format(item_prefix, key_types))
if has_terminal_key:
return self._underlying[item_path.value().key()]
elif has_dict_key:
return JsonFlatteningDict(self._underlying, prefix=item_path.key())
elif has_list_key:
return JsonFlatteningList(self._underlying, prefix=item_path.key())
else:
item_type = item_path.prefix().path_type()
if isinstance(item_type, flatpath.DictPrefixPath):
raise KeyError(item_path.prefix().key_string())
elif isinstance(item_type, flatpath.ListPrefixPath):
raise IndexError(item_path.prefix().index_number())
else:
raise ValueError(item_type)
def _has_dict_key(self, item_prefix):
return item_prefix + "." in self._underlying
def _has_list_key(self, item_prefix):
return item_prefix + "[" in self._underlying
def _has_terminal_key(self, item_prefix):
return item_prefix + "=" in self._underlying
def purge_prefix(self, prefix):
"Remove everythign in the store that starts with this prefix"
try:
key_after = treeutils.key_after_func(self._underlying)
except KeyError:
return
if key_after:
self._key_after_purge_prefix(key_after, prefix)
else:
self._inefficient_purge_prefix(prefix)
def _key_after_purge_prefix(self, key_after, prefix):
if prefix in self._underlying:
del self._underlying[prefix]
while True:
try:
key = key_after(prefix)
except KeyError:
break
if not key.startswith(prefix):
break
else:
del self._underlying[key]
def _inefficient_purge_prefix(self, prefix):
for key in list(self._underlying):
if key.startswith(prefix):
del self._underlying[key]
| {
"repo_name": "talwrii/jsdb",
"path": "jsdb/flatdict.py",
"copies": "1",
"size": "13294",
"license": "bsd-2-clause",
"hash": -3928607149466715000,
"line_mean": 33.7101827676,
"line_max": 104,
"alpha_frac": 0.5657439446,
"autogenerated": false,
"ratio": 3.971915147893636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5037659092493636,
"avg_score": null,
"num_lines": null
} |
"""A dict like object.
Implementation from http://benlast.livejournal.com/12301.html with unnecessary
zope security flag removed.
"""
class Structobject:
"""A 'bag' with keyword initialization, dict-semantics emulation and key
iteration.
"""
def __init__(self, **kw):
"""Initialize, and set attributes from all keyword arguments."""
self.__members = []
for k in list(kw.keys()):
setattr(self, k, kw[k])
self.__remember(k)
def __remember(self, k):
"""Add k to the list of explicitly set values."""
if k not in self.__members:
self.__members.append(k)
def __getitem__(self, key):
"""Equivalent of dict access by key."""
try:
return getattr(self, key)
except AttributeError as attrerr:
raise KeyError(key) from attrerr
def __setitem__(self, key, value):
setattr(self, key, value)
self.__remember(key)
def has_key(self, key):
"""wheter this Structobject contains a value for the given key.
:rtype: bool
"""
return hasattr(self, key)
def keys(self):
"""All keys this Structobject has values for.
:rtype: list
"""
return self.__members
def iterkeys(self):
"""All keys this Structobject has values for.
:rtype: list
"""
return self.__members
def __iter__(self):
return iter(self.__members)
def __str__(self):
"""Describe those attributes explicitly set."""
string = ""
for member in self.__members:
value = getattr(self, member)
if string:
string += ", "
string += "%string: %string" % (member, repr(value))
return string
| {
"repo_name": "tyrylu/pyfmodex",
"path": "pyfmodex/structobject.py",
"copies": "1",
"size": "1806",
"license": "mit",
"hash": -2678806814291471000,
"line_mean": 25.5588235294,
"line_max": 78,
"alpha_frac": 0.5542635659,
"autogenerated": false,
"ratio": 4.330935251798561,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5385198817698561,
"avg_score": null,
"num_lines": null
} |
# A dict of Rosetta score types generated from ScoreType.hh
# The tuples are of the form (English description, comments from ScoreType.hh - not necessarily meaningful, and the corresponding weights file)
score_types = {
"fa_atr" : ("Lennard-Jones attractive between atoms in different residues", "enumeration starts at 1 for indexing utility::vector1", ['standard_weights']),
"fa_rep" : ("Lennard-Jones repulsive between atoms in different residues", None, ['standard_weights']),
"fa_sol" : ("Lazaridis-Jarplus solvation energy", None, ['standard_weights']),
"fa_intra_atr" : (None, None, []),
"fa_intra_rep" : ("Lennard-Jones repulsive between atoms in the same residue", None, ['standard_weights']),
"fa_intra_sol" : (None, None, []),
"lk_hack" : (None, None, []),
"lk_ball" : (None, None, []),
"lk_ball_iso" : (None, None, []),
"coarse_fa_atr" : (None, None, []),
"coarse_fa_rep" : (None, None, []),
"coarse_fa_sol" : (None, None, []),
"coarse_beadlj" : (None, None, []),
"mm_lj_intra_rep" : (None, None, []),
"mm_lj_intra_atr" : (None, None, []),
"mm_lj_inter_rep" : (None, None, []),
"mm_lj_inter_atr" : (None, None, []),
"mm_twist" : (None, "could be lr 2benergy and not in energy graph", []),
"mm_bend" : ("Deviation of bond angles from the mean", "could be lr 2benergy and not in energy graph", []),
"mm_stretch" : (None, "could be lr 2benergy and not in energy graph", []),
"lk_costheta" : (None, None, []),
"lk_polar" : (None, None, []),
"lk_nonpolar" : (None, "Lazaridis-Karplus solvation energy, over nonpolar atoms", []),
"hack_elec" : (None, None, []),
"fa_elec" : ("Coulombic electrostatic potential with a distance-dependant dielectric", None, []),
"dslf_fa13" : ("Disulfide geometry potential", None, []),
"hack_elec_bb_bb" : (None, None, []),
"hack_elec_bb_sc" : (None, None, []),
"hack_elec_sc_sc" : (None, None, []),
"h2o_hbond" : (None, None, []),
"dna_dr" : (None, None, []),
"dna_bp" : (None, None, []),
"dna_bs" : (None, None, []),
"peptide_bond" : (None, None, []),
"pcs" : (None, "Pseudocontact Shift Energy", []),
"pcs2" : (None, "Pseudocontact Shift Energy version 2. Will replace pcs end of 2010", []),
"fastsaxs" : (None, "fastsaxs agreement using formulation of Stovgaard et al (BMC Bioinf. 2010)", []),
"saxs_score" : (None, "centroid saxs asessment", []),
"saxs_cen_score" : (None, None, []),
"saxs_fa_score" : (None, "full-atom SAXS score", []),
"pddf_score" : (None, "score based on pairwise distance distribution function", []),
"fa_mbenv" : (None, "depth dependent reference term", []),
"fa_mbsolv" : (None, "burial+depth dependent term", []),
"hack_elec_rna_phos_phos" : (None, "Simple electrostatic repulsion term between phosphates", []),
"hack_elec_rna_phos_sugr" : (None, None, []),
"hack_elec_rna_phos_base" : (None, None, []),
"hack_elec_rna_sugr_sugr" : (None, None, []),
"hack_elec_rna_sugr_base" : (None, None, []),
"hack_elec_rna_base_base" : (None, None, []),
"hack_elec_aro_aro" : (None, None, []),
"hack_elec_aro_all" : (None, None, []),
"hack_aro" : (None, None, []),
"rna_fa_atr_base" : (None, None, []),
"rna_fa_rep_base" : (None, None, []),
"rna_data_backbone" : (None, "Using chemical accessibility data for RNA.", []),
"ch_bond" : (None, "Carbon hydrogen bonds", []),
"ch_bond_bb_bb" : (None, None, []),
"ch_bond_sc_sc" : (None, None, []),
"ch_bond_bb_sc" : (None, None, []),
"pro_close" : ("Proline ring closure energy", None, ['standard_weights']),
"rama2b" : (None, None, []),
"vdw" : (None, "centroid", []),
"cenpack" : (None, "centroid", []),
"cenpack_smooth" : (None, "fpd smooth cenpack", []),
"cen_hb" : (None, "fpd centroid bb hbonding", []),
"hybrid_vdw" : (None, "hybrid centroid+fa", []),
"rna_vdw" : (None, "low res clash check for RNA", []),
"rna_base_backbone" : (None, "Bases to 2'-OH, phosphates, etc.", []),
"rna_backbone_backbone" : (None, "2'-OH to 2'-OH, phosphates, etc.", []),
"rna_repulsive" : (None, "mainly phosphate-phosphate repulsion", []),
"rna_base_pair_pairwise" : (None, "Base-base interactions (Watson-Crick and non-Watson-Crick)", []),
"rna_base_axis_pairwise" : (None, "Force base normals to be parallel", []),
"rna_base_stagger_pairwise" : (None, "Force base pairs to be in same plane.", []),
"rna_base_stack_pairwise" : (None, "Stacking interactions", []),
"rna_base_stack_axis_pairwise" : (None, "Stacking interactions should involve parallel bases.", []),
"rna_data_base" : (None, "Using chemical accessibility data for RNA.", []),
"rna_base_pair" : (None, "Base-base interactions (Watson-Crick and non-Watson-Crick)", []),
"rna_base_axis" : (None, "Force base normals to be parallel", []),
"rna_base_stagger" : (None, "Force base pairs to be in same plane.", []),
"rna_base_stack" : (None, "Stacking interactions", []),
"rna_base_stack_axis" : (None, "Stacking interactions should involve parallel bases.", []),
"rna_torsion" : (None, "RNA torsional potential.", []),
"rna_sugar_close" : (None, "constraints to keep RNA sugar closed, and with reasonably ideal geometry", []),
"fa_stack" : (None, "stacking interaction modeled as pairwise atom-atom interactions", []),
"fa_stack_aro" : (None, None, []),
"fa_intra_RNA_base_phos_atr" : (None, "RNA specific score term", []),
"fa_intra_RNA_base_phos_rep" : (None, "RNA specific score term", []),
"fa_intra_RNA_base_phos_sol" : (None, "RNA specific score term", []),
"lk_polar_intra_RNA" : (None, "RNA specific score term", []),
"lk_nonpolar_intra_RNA" : (None, "RNA specific score term", []),
"hbond_intra" : (None, "Currently effects only RNA", []),
"geom_sol_intra_RNA" : (None, "RNA specific score term", []),
"CI_geom_sol" : (None, "Context independent version. Currently tested only for RNA case.", []),
"CI_geom_sol_intra_RNA" : (None, "RNA specific score term", []),
"fa_cust_pair_dist" : (None, "custom short range 2b", []),
"custom_atom_pair" : (None, None, []),
"orbitals_hpol" : (None, None, []),
"orbitals_haro" : (None, None, []),
"orbitals_orbitals" : (None, None, []),
"orbitals_hpol_bb" : (None, None, []),
"PyRosettaTwoBodyContextIndepenedentEnergy_first" : (None, None, []),
"PyRosettaTwoBodyContextIndepenedentEnergy_last" : (None, None, []),
"python" : (None, "<-- Deprecated use PyRosettaEnergie* instead", []),
"n_ci_2b_score_types" : (None, "/ keep this guy at the end of the ci2b scores", []),
"fa_pair" : ("Statistics-based pair term, favors salt bridges (replaced by fa_elec in Talaris2013)", "/ == fa_pair_pol_pol", ['standard_weights']),
"fa_pair_aro_aro" : (None, None, []),
"fa_pair_aro_pol" : (None, None, []),
"fa_pair_pol_pol" : (None, None, []),
"fa_plane" : ("pi-pi interaction between aromatic groups, by default = 0", None, ['standard_weights']),
"hbond_sr_bb" : ("Backbone-backbone hbonds close in primary sequence", None, ['standard_weights']),
"hbond_lr_bb" : ("Backbone-backbone hbonds distant in primary sequence", None, ['standard_weights']),
"hbond_bb_sc" : ("Sidechain-backbone hydrogen bond energy", None, ['standard_weights']),
"hbond_sr_bb_sc" : (None, None, []),
"hbond_lr_bb_sc" : (None, None, []),
"hbond_sc" : ("Sidechain-sidechain hydrogen bond energy", None, ['standard_weights']),
"PyRosettaTwoBodyContextDependentEnergy_first" : (None, None, []),
"PyRosettaTwoBodyContextDependentEnergy_last" : (None, None, []),
"interface_dd_pair" : (None, None, []),
"geom_sol" : (None, "Geometric Solvation energy for polar atoms", []),
"occ_sol_fitted" : (None, None, []),
"occ_sol_fitted_onebody" : (None, None, []),
"occ_sol_exact" : (None, None, []),
"pair" : (None, "centroid", []),
"cen_pair_smooth" : (None, "fpd smooth centroid pair", []),
"Mpair" : (None, None, []),
"suck" : (None, None, []),
"rna_rg" : (None, "Radius of gyration for RNA", []),
"interchain_pair" : (None, None, []),
"interchain_vdw" : (None, None, []),
"n_shortranged_2b_score_types" : (None, "keep this guy at the end of the sr ci/cd 2b scores", []),
"gb_elec" : (None, None, []),
"dslf_ss_dst" : ("Distance score in current disulfide (replaced by dslf_fa13 in Talaris2013)", None, ['standard_weights']),
"dslf_cs_ang" : ("CSangles score in current disulfide (replaced by dslf_fa13 in Talaris2013)", None, ['standard_weights']),
"dslf_ss_dih" : ("Dihedral score in current disulfide (replaced by dslf_fa13 in Talaris2013)", None, ['standard_weights']),
"dslf_ca_dih" : ("Ca dihedral score in current disulfide (replaced by dslf_fa13 in Talaris2013)", None, ['standard_weights']),
"dslf_cbs_ds" : (None, None, []),
"dslfc_cen_dst" : (None, None, []),
"dslfc_cb_dst" : (None, None, []),
"dslfc_ang" : (None, None, []),
"dslfc_cb_dih" : (None, None, []),
"dslfc_bb_dih" : (None, None, []),
"dslfc_rot" : (None, None, []),
"dslfc_trans" : (None, None, []),
"dslfc_RT" : (None, None, []),
"atom_pair_constraint" : (None, "Harmonic constraints between atoms involved in Watson-Crick base pairs specified by the user in the params file", []),
"constant_constraint" : (None, None, []),
"coordinate_constraint" : (None, None, []),
"angle_constraint" : (None, None, []),
"dihedral_constraint" : (None, None, []),
"big_bin_constraint" : (None, None, []),
"dunbrack_constraint" : (None, None, []),
"site_constraint" : (None, None, []),
"rna_bond_geometry" : (None, "deviations from ideal geometry", []),
"rama" : ("Ramachandran preferences", None, ['score12_wts_patch']),
"omega" : ("Omega dihedral in the backbone", None, ['score12_wts_patch']),
"fa_dun" : ("Internal energy of sidechain rotamers as derived from Dunbrack's statistics", None, ['standard_weights']),
"p_aa_pp" : ("Probability of amino acid at phi/psi", None, ['standard_weights']),
"yhh_planarity" : (None, None, []),
"h2o_intra" : (None, None, []),
"ref" : ("Reference energy for each amino acid", None, ['standard_weights']),
"seqdep_ref" : (None, None, []),
"envsmooth" : (None, None, []),
"e_pH" : (None, None, []),
"rna_bulge" : (None, None, []),
"special_rot" : (None, None, []),
"PB_elec" : (None, None, []),
"cen_env_smooth" : (None, "fpd smooth centroid env", []),
"cbeta_smooth" : (None, "fpd smooth cbeta", []),
"env" : (None, None, []),
"cbeta" : (None, None, []),
"DFIRE" : (None, None, []),
"Menv" : (None, None, []),
"Mcbeta" : (None, None, []),
"Menv_non_helix" : (None, None, []),
"Menv_termini" : (None, None, []),
"Menv_tm_proj" : (None, None, []),
"Mlipo" : (None, None, []),
"rg" : (None, "radius of gyration", []),
"co" : (None, "contact order", []),
"hs_pair" : (None, None, []),
"ss_pair" : (None, None, []),
"rsigma" : (None, None, []),
"sheet" : (None, None, []),
"burial" : (None, "informatic burial prediction", []),
"abego" : (None, "informatic torsion-bin prediction", []),
"natbias_ss" : (None, None, []),
"natbias_hs" : (None, None, []),
"natbias_hh" : (None, None, []),
"natbias_stwist" : (None, None, []),
"aa_cmp" : (None, None, []),
"dock_ens_conf" : (None, "conformer reference energies for docking", []),
"rdc" : (None, "NMR residual dipolar coupling energy", []),
"rdc_segments" : (None, "fit alignment on multiple segments independently", []),
"rdc_rohl" : (None, None, []),
"holes" : (None, None, []),
"holes_decoy" : (None, None, []),
"holes_resl" : (None, None, []),
"holes_min" : (None, None, []),
"holes_min_mean" : (None, None, []),
"dab_sasa" : (None, "classic 1.4A probe solvant accessible surface area", []),
"dab_sev" : (None, "solvent excluded volume -- volume of atoms inflated by 1.4A", []),
"sa" : (None, "nonpolar contribution in GBSA", []),
"interchain_env" : (None, None, []),
"interchain_contact" : (None, None, []),
"chainbreak" : (None, None, []),
"linear_chainbreak" : (None, None, []),
"overlap_chainbreak" : (None, None, []),
"distance_chainbreak" : (None, None, []),
"dof_constraint" : (None, None, []),
"cart_bonded" : (None, "cartesian bonded potential", []),
"neigh_vect" : (None, None, []),
"neigh_count" : (None, None, []),
"neigh_vect_raw" : (None, None, []),
"symE_bonus" : (None, None, []),
"sym_lig" : (None, None, []),
"pack_stat" : (None, None, []),
"rms" : (None, "All-heavy-atom RMSD to the native structure", []),
"rms_stem" : (None, "All-heavy-atom RMSD to helical segments in the native structure, defined by 'STEM' entries in the parameters file", []),
"res_type_constraint" : (None, None, []),
"res_type_linking_constraint" : (None, None, []),
"pocket_constraint" : (None, None, []),
"backbone_stub_constraint" : (None, None, []),
"surface" : (None, None, []),
"p_aa" : (None, None, []),
"unfolded" : (None, None, []),
"elec_dens_fast" : (None, None, []),
"elec_dens_window" : (None, None, []),
"elec_dens_whole_structure_ca" : (None, None, []),
"elec_dens_whole_structure_allatom" : (None, None, []),
"elec_dens_atomwise" : (None, None, []),
"patterson_cc" : (None, None, []),
"hpatch" : (None, None, []),
"Menv_smooth" : (None, None, []),
"PyRosettaEnergy_first" : (None, None, []),
"PyRosettaEnergy_last" : (None, None, []),
"total_score" : (None, None, []),
"n_score_types" : (None, None, []),
"end_of_score_type_enumeration" : (None, None, []),
"N_WC" : (None, "Number of Watson-Crick base pairs", []),
"N_NWC" : (None, "Number of non-Watson-Crick base pairs", []),
"N_BS" : (None, "Number of base stacks", []),
"f_natWC" : (None, "fraction of native Watson-Crick base pairs recovered", []),
"f_natNWC" : (None, "fraction of native non-Watson-Crick base pairs recovered", []),
"f_natBP" : (None, "fraction of base pairs recovered", []),
}
class ScoreGroup(object):
def __init__(self, comment):
self.comment = comment
self.score_terms = []
def add(self, score_term, comment = None):
self.score_terms.append(dict(name = score_term, comment = comment))
def __len__(self):
return len(self.score_terms)
from .fs.fsio import read_file
from . import colortext
def parseScoreType(score_type_header_file):
contents = read_file(score_type_header_file)
left_idx = contents.find('enum ScoreType {')
contents = contents[left_idx+16:]
right_idx = contents.find('}')
contents = contents[:right_idx].strip()
assert(contents.find('{') == -1)
assert(contents.find('/*') == -1)
groups = []
group_comment = None
current_group = None
lines = [l.strip() for l in contents.split('\n') if l.strip()]
x = 0
while x < len(lines):
l = lines[x]
if l.startswith('//'):
if current_group != None:
groups.append(current_group)
group_comment = l[2:]
for y in range(x + 1, len(lines)):
l2 = lines[y]
if l2.startswith('//'):
group_comment += ' %s' % l2
else:
x = y - 1
break
group_comment = group_comment.replace('/', '').replace(' ', ' ').strip()
current_group = ScoreGroup(group_comment)
else:
assert(current_group != None)
comment = None
score_term = l[:l.find(',')].strip()
if l.find('//') != - 1:
comment = l[l.find('//') + 2:].replace('/', '').replace(' ', ' ').strip()
current_group.add(score_term, comment = comment)
x += 1
if current_group != None:
groups.append(current_group)
print((len(groups)))
for g in groups:
#colortext.warning(g.comment)
#colortext.warning('-' * len(g.comment))
print((g.comment))
print(('-' * len(g.comment)))
print('\n```')
for st in g.score_terms:
comments = [(st['comment'] or '').strip()]
term = st['name'].strip().replace(' = 1', '')
if score_types.get(term):
if score_types[term][0] and score_types[term][0].replace(' ', ' ').strip() not in comments:
comments.append(score_types[term][0].replace(' ', ' ').strip())
if score_types[term][1] and score_types[term][1].replace(' ', ' ').strip() not in comments:
comments.append(score_types[term][1].replace(' ', ' ').strip())
comments = [c[0].capitalize()+c[1:] for c in comments if c.strip()]
for x in range(len(comments)):
if comments[x].endswith('.'):
comments[x] = comments[x][:-1]
if comments:
if len(comments) > 1:
print((st['name'].ljust(43)))
print((' %s' % ('\n ').join(comments)))
else:
print(('%s%s' % (st['name'].ljust(43), comments[0])))
else:
print((st['name']))
print('```\n')
#enum ScoreType {
if __name__ == '__main__':
parseScoreType('/home/rosetta/trunk/master/source/src/core/scoring/ScoreType.hh') | {
"repo_name": "Kortemme-Lab/klab",
"path": "klab/scoretype.py",
"copies": "1",
"size": "25969",
"license": "mit",
"hash": -3352821850553479700,
"line_mean": 75.1583577713,
"line_max": 200,
"alpha_frac": 0.3828795872,
"autogenerated": false,
"ratio": 3.7789580908032594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9557239741392225,
"avg_score": 0.020919587322206747,
"num_lines": 341
} |
"""A dict subclass that can lookup several layers deep.
>>> my_dict = recursive_dict(**{
... 'alpha': {
... 'first': 1,
... 'second': 2,
... },
... 'many': [
... {'foo': {'test': True}},
... {'foo': {'test': False}},
... ]
... })
>>> my_dict['alpha', 'second']
2
>>> [item['foo', 'test'] for item in my_dict['many']]
[True, False]
"""
__author__ = 'Ryan Anguiano'
__email__ = 'ryan.anguiano@gmail.com'
__version__ = '0.2.0'
class recursive_lookup(object):
_original = None
_raise_errors = True
def make_recursive(self, item):
if isinstance(item, list):
item = recursive_list.from_list(item, self._raise_errors)
elif isinstance(item, dict):
item = recursive_dict.from_dict(item, self._raise_errors)
return item
def __getitem__(self, key):
item = super(recursive_lookup, self)
try:
if isinstance(key, tuple):
for arg in key:
item = item.__getitem__(arg)
else:
item = item.__getitem__(key)
return self.make_recursive(item)
except (KeyError, IndexError):
if self._raise_errors:
raise
def __setitem__(self, key, value):
item = super(recursive_lookup, self)
try:
if isinstance(key, tuple):
args, key = key[:-1], key[-1]
for arg in args:
item = item.__getitem__(arg)
item.__setitem__(key, value)
original = getattr(item, '_original', None)
if original:
original.__setitem__(key, value)
except (KeyError, IndexError):
if self._raise_errors:
raise
def __getattribute__(self, item):
excluded = ('_original', '_raise_errors', '__getitem__', '__setitem__')
if item not in excluded and self._original and hasattr(self._original, item):
return getattr(self._original, item)
else:
return super(recursive_lookup, self).__getattribute__(item)
class recursive_dict(recursive_lookup, dict):
@classmethod
def from_dict(cls, original, raise_errors=True):
new_dict = cls(**original)
new_dict._original = original
new_dict._raise_errors = raise_errors
return new_dict
class safe_recursive_dict(recursive_dict):
_raise_errors = False
class recursive_list(recursive_lookup, list):
@classmethod
def from_list(cls, original, raise_errors=True):
new_list = cls(original)
new_list._original = original
new_list._raise_errors = raise_errors
return new_list
def __init__(self, seq=()):
super(recursive_list, self).__init__(map(self.make_recursive, seq))
rdict = recursive_dict
safe_rdict = safe_recursive_dict
| {
"repo_name": "ryananguiano/recursive_dict",
"path": "recursive_dict.py",
"copies": "1",
"size": "2871",
"license": "isc",
"hash": 5954959218519440000,
"line_mean": 27.71,
"line_max": 85,
"alpha_frac": 0.5468477882,
"autogenerated": false,
"ratio": 3.9491059147180194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9995372307569182,
"avg_score": 0.00011627906976744185,
"num_lines": 100
} |
"""A dict to translate from IANA location name to Windows timezone name. Translations taken from
http://unicode.org/repos/cldr/trunk/common/supplemental/windowsZones.xml
"""
import re
import requests
from .util import to_xml
CLDR_WINZONE_URL = 'https://raw.githubusercontent.com/unicode-org/cldr/master/common/supplemental/windowsZones.xml'
DEFAULT_TERRITORY = '001'
CLDR_WINZONE_TYPE_VERSION = '2021a'
CLDR_WINZONE_OTHER_VERSION = '7e11800'
def generate_map(timeout=10):
"""Create a new CLDR_TO_MS_TIMEZONE_MAP map from the CLDR data. Used when the CLDR database is updated.
:param timeout: (Default value = 10)
:return:
"""
r = requests.get(CLDR_WINZONE_URL, timeout=timeout)
if r.status_code != 200:
raise ValueError('Unexpected response: %s' % r)
tz_map = {}
timezones_elem = to_xml(r.content).find('windowsZones').find('mapTimezones')
type_version = timezones_elem.get('typeVersion')
other_version = timezones_elem.get('otherVersion')
for e in timezones_elem.findall('mapZone'):
for location in re.split(r'\s+', e.get('type')):
if e.get('territory') == DEFAULT_TERRITORY or location not in tz_map:
# Prefer default territory. This is so MS_TIMEZONE_TO_IANA_MAP maps from MS timezone ID back to the
# "preferred" region/location timezone name.
if not location:
raise ValueError('Expected location')
tz_map[location] = e.get('other'), e.get('territory')
return type_version, other_version, tz_map
# This map is generated irregularly from generate_map(). Do not edit manually - make corrections to
# IANA_TO_MS_TIMEZONE_MAP instead. We provide this map to avoid hammering the CLDR_WINZONE_URL.
#
# This list was generated from CLDR_WINZONE_URL version CLDR_WINZONE_VERSION.
CLDR_TO_MS_TIMEZONE_MAP = {
'Africa/Abidjan': ('Greenwich Standard Time', 'CI'),
'Africa/Accra': ('Greenwich Standard Time', 'GH'),
'Africa/Addis_Ababa': ('E. Africa Standard Time', 'ET'),
'Africa/Algiers': ('W. Central Africa Standard Time', 'DZ'),
'Africa/Asmera': ('E. Africa Standard Time', 'ER'),
'Africa/Bamako': ('Greenwich Standard Time', 'ML'),
'Africa/Bangui': ('W. Central Africa Standard Time', 'CF'),
'Africa/Banjul': ('Greenwich Standard Time', 'GM'),
'Africa/Bissau': ('Greenwich Standard Time', 'GW'),
'Africa/Blantyre': ('South Africa Standard Time', 'MW'),
'Africa/Brazzaville': ('W. Central Africa Standard Time', 'CG'),
'Africa/Bujumbura': ('South Africa Standard Time', 'BI'),
'Africa/Cairo': ('Egypt Standard Time', '001'),
'Africa/Casablanca': ('Morocco Standard Time', '001'),
'Africa/Ceuta': ('Romance Standard Time', 'ES'),
'Africa/Conakry': ('Greenwich Standard Time', 'GN'),
'Africa/Dakar': ('Greenwich Standard Time', 'SN'),
'Africa/Dar_es_Salaam': ('E. Africa Standard Time', 'TZ'),
'Africa/Djibouti': ('E. Africa Standard Time', 'DJ'),
'Africa/Douala': ('W. Central Africa Standard Time', 'CM'),
'Africa/El_Aaiun': ('Morocco Standard Time', 'EH'),
'Africa/Freetown': ('Greenwich Standard Time', 'SL'),
'Africa/Gaborone': ('South Africa Standard Time', 'BW'),
'Africa/Harare': ('South Africa Standard Time', 'ZW'),
'Africa/Johannesburg': ('South Africa Standard Time', '001'),
'Africa/Juba': ('South Sudan Standard Time', '001'),
'Africa/Kampala': ('E. Africa Standard Time', 'UG'),
'Africa/Khartoum': ('Sudan Standard Time', '001'),
'Africa/Kigali': ('South Africa Standard Time', 'RW'),
'Africa/Kinshasa': ('W. Central Africa Standard Time', 'CD'),
'Africa/Lagos': ('W. Central Africa Standard Time', '001'),
'Africa/Libreville': ('W. Central Africa Standard Time', 'GA'),
'Africa/Lome': ('Greenwich Standard Time', 'TG'),
'Africa/Luanda': ('W. Central Africa Standard Time', 'AO'),
'Africa/Lubumbashi': ('South Africa Standard Time', 'CD'),
'Africa/Lusaka': ('South Africa Standard Time', 'ZM'),
'Africa/Malabo': ('W. Central Africa Standard Time', 'GQ'),
'Africa/Maputo': ('South Africa Standard Time', 'MZ'),
'Africa/Maseru': ('South Africa Standard Time', 'LS'),
'Africa/Mbabane': ('South Africa Standard Time', 'SZ'),
'Africa/Mogadishu': ('E. Africa Standard Time', 'SO'),
'Africa/Monrovia': ('Greenwich Standard Time', 'LR'),
'Africa/Nairobi': ('E. Africa Standard Time', '001'),
'Africa/Ndjamena': ('W. Central Africa Standard Time', 'TD'),
'Africa/Niamey': ('W. Central Africa Standard Time', 'NE'),
'Africa/Nouakchott': ('Greenwich Standard Time', 'MR'),
'Africa/Ouagadougou': ('Greenwich Standard Time', 'BF'),
'Africa/Porto-Novo': ('W. Central Africa Standard Time', 'BJ'),
'Africa/Sao_Tome': ('Sao Tome Standard Time', '001'),
'Africa/Tripoli': ('Libya Standard Time', '001'),
'Africa/Tunis': ('W. Central Africa Standard Time', 'TN'),
'Africa/Windhoek': ('Namibia Standard Time', '001'),
'America/Adak': ('Aleutian Standard Time', '001'),
'America/Anchorage': ('Alaskan Standard Time', '001'),
'America/Anguilla': ('SA Western Standard Time', 'AI'),
'America/Antigua': ('SA Western Standard Time', 'AG'),
'America/Araguaina': ('Tocantins Standard Time', '001'),
'America/Argentina/La_Rioja': ('Argentina Standard Time', 'AR'),
'America/Argentina/Rio_Gallegos': ('Argentina Standard Time', 'AR'),
'America/Argentina/Salta': ('Argentina Standard Time', 'AR'),
'America/Argentina/San_Juan': ('Argentina Standard Time', 'AR'),
'America/Argentina/San_Luis': ('Argentina Standard Time', 'AR'),
'America/Argentina/Tucuman': ('Argentina Standard Time', 'AR'),
'America/Argentina/Ushuaia': ('Argentina Standard Time', 'AR'),
'America/Aruba': ('SA Western Standard Time', 'AW'),
'America/Asuncion': ('Paraguay Standard Time', '001'),
'America/Bahia': ('Bahia Standard Time', '001'),
'America/Bahia_Banderas': ('Central Standard Time (Mexico)', 'MX'),
'America/Barbados': ('SA Western Standard Time', 'BB'),
'America/Belem': ('SA Eastern Standard Time', 'BR'),
'America/Belize': ('Central America Standard Time', 'BZ'),
'America/Blanc-Sablon': ('SA Western Standard Time', 'CA'),
'America/Boa_Vista': ('SA Western Standard Time', 'BR'),
'America/Bogota': ('SA Pacific Standard Time', '001'),
'America/Boise': ('Mountain Standard Time', 'US'),
'America/Buenos_Aires': ('Argentina Standard Time', '001'),
'America/Cambridge_Bay': ('Mountain Standard Time', 'CA'),
'America/Campo_Grande': ('Central Brazilian Standard Time', 'BR'),
'America/Cancun': ('Eastern Standard Time (Mexico)', '001'),
'America/Caracas': ('Venezuela Standard Time', '001'),
'America/Catamarca': ('Argentina Standard Time', 'AR'),
'America/Cayenne': ('SA Eastern Standard Time', '001'),
'America/Cayman': ('SA Pacific Standard Time', 'KY'),
'America/Chicago': ('Central Standard Time', '001'),
'America/Chihuahua': ('Mountain Standard Time (Mexico)', '001'),
'America/Coral_Harbour': ('SA Pacific Standard Time', 'CA'),
'America/Cordoba': ('Argentina Standard Time', 'AR'),
'America/Costa_Rica': ('Central America Standard Time', 'CR'),
'America/Creston': ('US Mountain Standard Time', 'CA'),
'America/Cuiaba': ('Central Brazilian Standard Time', '001'),
'America/Curacao': ('SA Western Standard Time', 'CW'),
'America/Danmarkshavn': ('Greenwich Standard Time', 'GL'),
'America/Dawson': ('Yukon Standard Time', 'CA'),
'America/Dawson_Creek': ('US Mountain Standard Time', 'CA'),
'America/Denver': ('Mountain Standard Time', '001'),
'America/Detroit': ('Eastern Standard Time', 'US'),
'America/Dominica': ('SA Western Standard Time', 'DM'),
'America/Edmonton': ('Mountain Standard Time', 'CA'),
'America/Eirunepe': ('SA Pacific Standard Time', 'BR'),
'America/El_Salvador': ('Central America Standard Time', 'SV'),
'America/Fort_Nelson': ('US Mountain Standard Time', 'CA'),
'America/Fortaleza': ('SA Eastern Standard Time', 'BR'),
'America/Glace_Bay': ('Atlantic Standard Time', 'CA'),
'America/Godthab': ('Greenland Standard Time', '001'),
'America/Goose_Bay': ('Atlantic Standard Time', 'CA'),
'America/Grand_Turk': ('Turks And Caicos Standard Time', '001'),
'America/Grenada': ('SA Western Standard Time', 'GD'),
'America/Guadeloupe': ('SA Western Standard Time', 'GP'),
'America/Guatemala': ('Central America Standard Time', '001'),
'America/Guayaquil': ('SA Pacific Standard Time', 'EC'),
'America/Guyana': ('SA Western Standard Time', 'GY'),
'America/Halifax': ('Atlantic Standard Time', '001'),
'America/Havana': ('Cuba Standard Time', '001'),
'America/Hermosillo': ('US Mountain Standard Time', 'MX'),
'America/Indiana/Knox': ('Central Standard Time', 'US'),
'America/Indiana/Marengo': ('US Eastern Standard Time', 'US'),
'America/Indiana/Petersburg': ('Eastern Standard Time', 'US'),
'America/Indiana/Tell_City': ('Central Standard Time', 'US'),
'America/Indiana/Vevay': ('US Eastern Standard Time', 'US'),
'America/Indiana/Vincennes': ('Eastern Standard Time', 'US'),
'America/Indiana/Winamac': ('Eastern Standard Time', 'US'),
'America/Indianapolis': ('US Eastern Standard Time', '001'),
'America/Inuvik': ('Mountain Standard Time', 'CA'),
'America/Iqaluit': ('Eastern Standard Time', 'CA'),
'America/Jamaica': ('SA Pacific Standard Time', 'JM'),
'America/Jujuy': ('Argentina Standard Time', 'AR'),
'America/Juneau': ('Alaskan Standard Time', 'US'),
'America/Kentucky/Monticello': ('Eastern Standard Time', 'US'),
'America/Kralendijk': ('SA Western Standard Time', 'BQ'),
'America/La_Paz': ('SA Western Standard Time', '001'),
'America/Lima': ('SA Pacific Standard Time', 'PE'),
'America/Los_Angeles': ('Pacific Standard Time', '001'),
'America/Louisville': ('Eastern Standard Time', 'US'),
'America/Lower_Princes': ('SA Western Standard Time', 'SX'),
'America/Maceio': ('SA Eastern Standard Time', 'BR'),
'America/Managua': ('Central America Standard Time', 'NI'),
'America/Manaus': ('SA Western Standard Time', 'BR'),
'America/Marigot': ('SA Western Standard Time', 'MF'),
'America/Martinique': ('SA Western Standard Time', 'MQ'),
'America/Matamoros': ('Central Standard Time', 'MX'),
'America/Mazatlan': ('Mountain Standard Time (Mexico)', 'MX'),
'America/Mendoza': ('Argentina Standard Time', 'AR'),
'America/Menominee': ('Central Standard Time', 'US'),
'America/Merida': ('Central Standard Time (Mexico)', 'MX'),
'America/Metlakatla': ('Alaskan Standard Time', 'US'),
'America/Mexico_City': ('Central Standard Time (Mexico)', '001'),
'America/Miquelon': ('Saint Pierre Standard Time', '001'),
'America/Moncton': ('Atlantic Standard Time', 'CA'),
'America/Monterrey': ('Central Standard Time (Mexico)', 'MX'),
'America/Montevideo': ('Montevideo Standard Time', '001'),
'America/Montreal': ('Eastern Standard Time', 'CA'),
'America/Montserrat': ('SA Western Standard Time', 'MS'),
'America/Nassau': ('Eastern Standard Time', 'BS'),
'America/New_York': ('Eastern Standard Time', '001'),
'America/Nipigon': ('Eastern Standard Time', 'CA'),
'America/Nome': ('Alaskan Standard Time', 'US'),
'America/Noronha': ('UTC-02', 'BR'),
'America/North_Dakota/Beulah': ('Central Standard Time', 'US'),
'America/North_Dakota/Center': ('Central Standard Time', 'US'),
'America/North_Dakota/New_Salem': ('Central Standard Time', 'US'),
'America/Ojinaga': ('Mountain Standard Time', 'MX'),
'America/Panama': ('SA Pacific Standard Time', 'PA'),
'America/Pangnirtung': ('Eastern Standard Time', 'CA'),
'America/Paramaribo': ('SA Eastern Standard Time', 'SR'),
'America/Phoenix': ('US Mountain Standard Time', '001'),
'America/Port-au-Prince': ('Haiti Standard Time', '001'),
'America/Port_of_Spain': ('SA Western Standard Time', 'TT'),
'America/Porto_Velho': ('SA Western Standard Time', 'BR'),
'America/Puerto_Rico': ('SA Western Standard Time', 'PR'),
'America/Punta_Arenas': ('Magallanes Standard Time', '001'),
'America/Rainy_River': ('Central Standard Time', 'CA'),
'America/Rankin_Inlet': ('Central Standard Time', 'CA'),
'America/Recife': ('SA Eastern Standard Time', 'BR'),
'America/Regina': ('Canada Central Standard Time', '001'),
'America/Resolute': ('Central Standard Time', 'CA'),
'America/Rio_Branco': ('SA Pacific Standard Time', 'BR'),
'America/Santa_Isabel': ('Pacific Standard Time (Mexico)', 'MX'),
'America/Santarem': ('SA Eastern Standard Time', 'BR'),
'America/Santiago': ('Pacific SA Standard Time', '001'),
'America/Santo_Domingo': ('SA Western Standard Time', 'DO'),
'America/Sao_Paulo': ('E. South America Standard Time', '001'),
'America/Scoresbysund': ('Azores Standard Time', 'GL'),
'America/Sitka': ('Alaskan Standard Time', 'US'),
'America/St_Barthelemy': ('SA Western Standard Time', 'BL'),
'America/St_Johns': ('Newfoundland Standard Time', '001'),
'America/St_Kitts': ('SA Western Standard Time', 'KN'),
'America/St_Lucia': ('SA Western Standard Time', 'LC'),
'America/St_Thomas': ('SA Western Standard Time', 'VI'),
'America/St_Vincent': ('SA Western Standard Time', 'VC'),
'America/Swift_Current': ('Canada Central Standard Time', 'CA'),
'America/Tegucigalpa': ('Central America Standard Time', 'HN'),
'America/Thule': ('Atlantic Standard Time', 'GL'),
'America/Thunder_Bay': ('Eastern Standard Time', 'CA'),
'America/Tijuana': ('Pacific Standard Time (Mexico)', '001'),
'America/Toronto': ('Eastern Standard Time', 'CA'),
'America/Tortola': ('SA Western Standard Time', 'VG'),
'America/Vancouver': ('Pacific Standard Time', 'CA'),
'America/Whitehorse': ('Yukon Standard Time', '001'),
'America/Winnipeg': ('Central Standard Time', 'CA'),
'America/Yakutat': ('Alaskan Standard Time', 'US'),
'America/Yellowknife': ('Mountain Standard Time', 'CA'),
'Antarctica/Casey': ('Central Pacific Standard Time', 'AQ'),
'Antarctica/Davis': ('SE Asia Standard Time', 'AQ'),
'Antarctica/DumontDUrville': ('West Pacific Standard Time', 'AQ'),
'Antarctica/Macquarie': ('Tasmania Standard Time', 'AU'),
'Antarctica/Mawson': ('West Asia Standard Time', 'AQ'),
'Antarctica/McMurdo': ('New Zealand Standard Time', 'AQ'),
'Antarctica/Palmer': ('SA Eastern Standard Time', 'AQ'),
'Antarctica/Rothera': ('SA Eastern Standard Time', 'AQ'),
'Antarctica/Syowa': ('E. Africa Standard Time', 'AQ'),
'Antarctica/Vostok': ('Central Asia Standard Time', 'AQ'),
'Arctic/Longyearbyen': ('W. Europe Standard Time', 'SJ'),
'Asia/Aden': ('Arab Standard Time', 'YE'),
'Asia/Almaty': ('Central Asia Standard Time', '001'),
'Asia/Amman': ('Jordan Standard Time', '001'),
'Asia/Anadyr': ('Russia Time Zone 11', 'RU'),
'Asia/Aqtau': ('West Asia Standard Time', 'KZ'),
'Asia/Aqtobe': ('West Asia Standard Time', 'KZ'),
'Asia/Ashgabat': ('West Asia Standard Time', 'TM'),
'Asia/Atyrau': ('West Asia Standard Time', 'KZ'),
'Asia/Baghdad': ('Arabic Standard Time', '001'),
'Asia/Bahrain': ('Arab Standard Time', 'BH'),
'Asia/Baku': ('Azerbaijan Standard Time', '001'),
'Asia/Bangkok': ('SE Asia Standard Time', '001'),
'Asia/Barnaul': ('Altai Standard Time', '001'),
'Asia/Beirut': ('Middle East Standard Time', '001'),
'Asia/Bishkek': ('Central Asia Standard Time', 'KG'),
'Asia/Brunei': ('Singapore Standard Time', 'BN'),
'Asia/Calcutta': ('India Standard Time', '001'),
'Asia/Chita': ('Transbaikal Standard Time', '001'),
'Asia/Choibalsan': ('Ulaanbaatar Standard Time', 'MN'),
'Asia/Colombo': ('Sri Lanka Standard Time', '001'),
'Asia/Damascus': ('Syria Standard Time', '001'),
'Asia/Dhaka': ('Bangladesh Standard Time', '001'),
'Asia/Dili': ('Tokyo Standard Time', 'TL'),
'Asia/Dubai': ('Arabian Standard Time', '001'),
'Asia/Dushanbe': ('West Asia Standard Time', 'TJ'),
'Asia/Famagusta': ('GTB Standard Time', 'CY'),
'Asia/Gaza': ('West Bank Standard Time', 'PS'),
'Asia/Hebron': ('West Bank Standard Time', '001'),
'Asia/Hong_Kong': ('China Standard Time', 'HK'),
'Asia/Hovd': ('W. Mongolia Standard Time', '001'),
'Asia/Irkutsk': ('North Asia East Standard Time', '001'),
'Asia/Jakarta': ('SE Asia Standard Time', 'ID'),
'Asia/Jayapura': ('Tokyo Standard Time', 'ID'),
'Asia/Jerusalem': ('Israel Standard Time', '001'),
'Asia/Kabul': ('Afghanistan Standard Time', '001'),
'Asia/Kamchatka': ('Russia Time Zone 11', '001'),
'Asia/Karachi': ('Pakistan Standard Time', '001'),
'Asia/Katmandu': ('Nepal Standard Time', '001'),
'Asia/Khandyga': ('Yakutsk Standard Time', 'RU'),
'Asia/Krasnoyarsk': ('North Asia Standard Time', '001'),
'Asia/Kuala_Lumpur': ('Singapore Standard Time', 'MY'),
'Asia/Kuching': ('Singapore Standard Time', 'MY'),
'Asia/Kuwait': ('Arab Standard Time', 'KW'),
'Asia/Macau': ('China Standard Time', 'MO'),
'Asia/Magadan': ('Magadan Standard Time', '001'),
'Asia/Makassar': ('Singapore Standard Time', 'ID'),
'Asia/Manila': ('Singapore Standard Time', 'PH'),
'Asia/Muscat': ('Arabian Standard Time', 'OM'),
'Asia/Nicosia': ('GTB Standard Time', 'CY'),
'Asia/Novokuznetsk': ('North Asia Standard Time', 'RU'),
'Asia/Novosibirsk': ('N. Central Asia Standard Time', '001'),
'Asia/Omsk': ('Omsk Standard Time', '001'),
'Asia/Oral': ('West Asia Standard Time', 'KZ'),
'Asia/Phnom_Penh': ('SE Asia Standard Time', 'KH'),
'Asia/Pontianak': ('SE Asia Standard Time', 'ID'),
'Asia/Pyongyang': ('North Korea Standard Time', '001'),
'Asia/Qatar': ('Arab Standard Time', 'QA'),
'Asia/Qostanay': ('Central Asia Standard Time', 'KZ'),
'Asia/Qyzylorda': ('Qyzylorda Standard Time', '001'),
'Asia/Rangoon': ('Myanmar Standard Time', '001'),
'Asia/Riyadh': ('Arab Standard Time', '001'),
'Asia/Saigon': ('SE Asia Standard Time', 'VN'),
'Asia/Sakhalin': ('Sakhalin Standard Time', '001'),
'Asia/Samarkand': ('West Asia Standard Time', 'UZ'),
'Asia/Seoul': ('Korea Standard Time', '001'),
'Asia/Shanghai': ('China Standard Time', '001'),
'Asia/Singapore': ('Singapore Standard Time', '001'),
'Asia/Srednekolymsk': ('Russia Time Zone 10', '001'),
'Asia/Taipei': ('Taipei Standard Time', '001'),
'Asia/Tashkent': ('West Asia Standard Time', '001'),
'Asia/Tbilisi': ('Georgian Standard Time', '001'),
'Asia/Tehran': ('Iran Standard Time', '001'),
'Asia/Thimphu': ('Bangladesh Standard Time', 'BT'),
'Asia/Tokyo': ('Tokyo Standard Time', '001'),
'Asia/Tomsk': ('Tomsk Standard Time', '001'),
'Asia/Ulaanbaatar': ('Ulaanbaatar Standard Time', '001'),
'Asia/Urumqi': ('Central Asia Standard Time', 'CN'),
'Asia/Ust-Nera': ('Vladivostok Standard Time', 'RU'),
'Asia/Vientiane': ('SE Asia Standard Time', 'LA'),
'Asia/Vladivostok': ('Vladivostok Standard Time', '001'),
'Asia/Yakutsk': ('Yakutsk Standard Time', '001'),
'Asia/Yekaterinburg': ('Ekaterinburg Standard Time', '001'),
'Asia/Yerevan': ('Caucasus Standard Time', '001'),
'Atlantic/Azores': ('Azores Standard Time', '001'),
'Atlantic/Bermuda': ('Atlantic Standard Time', 'BM'),
'Atlantic/Canary': ('GMT Standard Time', 'ES'),
'Atlantic/Cape_Verde': ('Cape Verde Standard Time', '001'),
'Atlantic/Faeroe': ('GMT Standard Time', 'FO'),
'Atlantic/Madeira': ('GMT Standard Time', 'PT'),
'Atlantic/Reykjavik': ('Greenwich Standard Time', '001'),
'Atlantic/South_Georgia': ('UTC-02', 'GS'),
'Atlantic/St_Helena': ('Greenwich Standard Time', 'SH'),
'Atlantic/Stanley': ('SA Eastern Standard Time', 'FK'),
'Australia/Adelaide': ('Cen. Australia Standard Time', '001'),
'Australia/Brisbane': ('E. Australia Standard Time', '001'),
'Australia/Broken_Hill': ('Cen. Australia Standard Time', 'AU'),
'Australia/Currie': ('Tasmania Standard Time', 'AU'),
'Australia/Darwin': ('AUS Central Standard Time', '001'),
'Australia/Eucla': ('Aus Central W. Standard Time', '001'),
'Australia/Hobart': ('Tasmania Standard Time', '001'),
'Australia/Lindeman': ('E. Australia Standard Time', 'AU'),
'Australia/Lord_Howe': ('Lord Howe Standard Time', '001'),
'Australia/Melbourne': ('AUS Eastern Standard Time', 'AU'),
'Australia/Perth': ('W. Australia Standard Time', '001'),
'Australia/Sydney': ('AUS Eastern Standard Time', '001'),
'CST6CDT': ('Central Standard Time', 'ZZ'),
'EST5EDT': ('Eastern Standard Time', 'ZZ'),
'Etc/GMT': ('UTC', 'ZZ'),
'Etc/GMT+1': ('Cape Verde Standard Time', 'ZZ'),
'Etc/GMT+10': ('Hawaiian Standard Time', 'ZZ'),
'Etc/GMT+11': ('UTC-11', '001'),
'Etc/GMT+12': ('Dateline Standard Time', '001'),
'Etc/GMT+2': ('UTC-02', '001'),
'Etc/GMT+3': ('SA Eastern Standard Time', 'ZZ'),
'Etc/GMT+4': ('SA Western Standard Time', 'ZZ'),
'Etc/GMT+5': ('SA Pacific Standard Time', 'ZZ'),
'Etc/GMT+6': ('Central America Standard Time', 'ZZ'),
'Etc/GMT+7': ('US Mountain Standard Time', 'ZZ'),
'Etc/GMT+8': ('UTC-08', '001'),
'Etc/GMT+9': ('UTC-09', '001'),
'Etc/GMT-1': ('W. Central Africa Standard Time', 'ZZ'),
'Etc/GMT-10': ('West Pacific Standard Time', 'ZZ'),
'Etc/GMT-11': ('Central Pacific Standard Time', 'ZZ'),
'Etc/GMT-12': ('UTC+12', '001'),
'Etc/GMT-13': ('UTC+13', '001'),
'Etc/GMT-14': ('Line Islands Standard Time', 'ZZ'),
'Etc/GMT-2': ('South Africa Standard Time', 'ZZ'),
'Etc/GMT-3': ('E. Africa Standard Time', 'ZZ'),
'Etc/GMT-4': ('Arabian Standard Time', 'ZZ'),
'Etc/GMT-5': ('West Asia Standard Time', 'ZZ'),
'Etc/GMT-6': ('Central Asia Standard Time', 'ZZ'),
'Etc/GMT-7': ('SE Asia Standard Time', 'ZZ'),
'Etc/GMT-8': ('Singapore Standard Time', 'ZZ'),
'Etc/GMT-9': ('Tokyo Standard Time', 'ZZ'),
'Etc/UTC': ('UTC', '001'),
'Europe/Amsterdam': ('W. Europe Standard Time', 'NL'),
'Europe/Andorra': ('W. Europe Standard Time', 'AD'),
'Europe/Astrakhan': ('Astrakhan Standard Time', '001'),
'Europe/Athens': ('GTB Standard Time', 'GR'),
'Europe/Belgrade': ('Central Europe Standard Time', 'RS'),
'Europe/Berlin': ('W. Europe Standard Time', '001'),
'Europe/Bratislava': ('Central Europe Standard Time', 'SK'),
'Europe/Brussels': ('Romance Standard Time', 'BE'),
'Europe/Bucharest': ('GTB Standard Time', '001'),
'Europe/Budapest': ('Central Europe Standard Time', '001'),
'Europe/Busingen': ('W. Europe Standard Time', 'DE'),
'Europe/Chisinau': ('E. Europe Standard Time', '001'),
'Europe/Copenhagen': ('Romance Standard Time', 'DK'),
'Europe/Dublin': ('GMT Standard Time', 'IE'),
'Europe/Gibraltar': ('W. Europe Standard Time', 'GI'),
'Europe/Guernsey': ('GMT Standard Time', 'GG'),
'Europe/Helsinki': ('FLE Standard Time', 'FI'),
'Europe/Isle_of_Man': ('GMT Standard Time', 'IM'),
'Europe/Istanbul': ('Turkey Standard Time', '001'),
'Europe/Jersey': ('GMT Standard Time', 'JE'),
'Europe/Kaliningrad': ('Kaliningrad Standard Time', '001'),
'Europe/Kiev': ('FLE Standard Time', '001'),
'Europe/Kirov': ('Russian Standard Time', 'RU'),
'Europe/Lisbon': ('GMT Standard Time', 'PT'),
'Europe/Ljubljana': ('Central Europe Standard Time', 'SI'),
'Europe/London': ('GMT Standard Time', '001'),
'Europe/Luxembourg': ('W. Europe Standard Time', 'LU'),
'Europe/Madrid': ('Romance Standard Time', 'ES'),
'Europe/Malta': ('W. Europe Standard Time', 'MT'),
'Europe/Mariehamn': ('FLE Standard Time', 'AX'),
'Europe/Minsk': ('Belarus Standard Time', '001'),
'Europe/Monaco': ('W. Europe Standard Time', 'MC'),
'Europe/Moscow': ('Russian Standard Time', '001'),
'Europe/Oslo': ('W. Europe Standard Time', 'NO'),
'Europe/Paris': ('Romance Standard Time', '001'),
'Europe/Podgorica': ('Central Europe Standard Time', 'ME'),
'Europe/Prague': ('Central Europe Standard Time', 'CZ'),
'Europe/Riga': ('FLE Standard Time', 'LV'),
'Europe/Rome': ('W. Europe Standard Time', 'IT'),
'Europe/Samara': ('Russia Time Zone 3', '001'),
'Europe/San_Marino': ('W. Europe Standard Time', 'SM'),
'Europe/Sarajevo': ('Central European Standard Time', 'BA'),
'Europe/Saratov': ('Saratov Standard Time', '001'),
'Europe/Simferopol': ('Russian Standard Time', 'UA'),
'Europe/Skopje': ('Central European Standard Time', 'MK'),
'Europe/Sofia': ('FLE Standard Time', 'BG'),
'Europe/Stockholm': ('W. Europe Standard Time', 'SE'),
'Europe/Tallinn': ('FLE Standard Time', 'EE'),
'Europe/Tirane': ('Central Europe Standard Time', 'AL'),
'Europe/Ulyanovsk': ('Astrakhan Standard Time', 'RU'),
'Europe/Uzhgorod': ('FLE Standard Time', 'UA'),
'Europe/Vaduz': ('W. Europe Standard Time', 'LI'),
'Europe/Vatican': ('W. Europe Standard Time', 'VA'),
'Europe/Vienna': ('W. Europe Standard Time', 'AT'),
'Europe/Vilnius': ('FLE Standard Time', 'LT'),
'Europe/Volgograd': ('Volgograd Standard Time', '001'),
'Europe/Warsaw': ('Central European Standard Time', '001'),
'Europe/Zagreb': ('Central European Standard Time', 'HR'),
'Europe/Zaporozhye': ('FLE Standard Time', 'UA'),
'Europe/Zurich': ('W. Europe Standard Time', 'CH'),
'Indian/Antananarivo': ('E. Africa Standard Time', 'MG'),
'Indian/Chagos': ('Central Asia Standard Time', 'IO'),
'Indian/Christmas': ('SE Asia Standard Time', 'CX'),
'Indian/Cocos': ('Myanmar Standard Time', 'CC'),
'Indian/Comoro': ('E. Africa Standard Time', 'KM'),
'Indian/Kerguelen': ('West Asia Standard Time', 'TF'),
'Indian/Mahe': ('Mauritius Standard Time', 'SC'),
'Indian/Maldives': ('West Asia Standard Time', 'MV'),
'Indian/Mauritius': ('Mauritius Standard Time', '001'),
'Indian/Mayotte': ('E. Africa Standard Time', 'YT'),
'Indian/Reunion': ('Mauritius Standard Time', 'RE'),
'MST7MDT': ('Mountain Standard Time', 'ZZ'),
'PST8PDT': ('Pacific Standard Time', 'ZZ'),
'Pacific/Apia': ('Samoa Standard Time', '001'),
'Pacific/Auckland': ('New Zealand Standard Time', '001'),
'Pacific/Bougainville': ('Bougainville Standard Time', '001'),
'Pacific/Chatham': ('Chatham Islands Standard Time', '001'),
'Pacific/Easter': ('Easter Island Standard Time', '001'),
'Pacific/Efate': ('Central Pacific Standard Time', 'VU'),
'Pacific/Enderbury': ('UTC+13', 'KI'),
'Pacific/Fakaofo': ('UTC+13', 'TK'),
'Pacific/Fiji': ('Fiji Standard Time', '001'),
'Pacific/Funafuti': ('UTC+12', 'TV'),
'Pacific/Galapagos': ('Central America Standard Time', 'EC'),
'Pacific/Gambier': ('UTC-09', 'PF'),
'Pacific/Guadalcanal': ('Central Pacific Standard Time', '001'),
'Pacific/Guam': ('West Pacific Standard Time', 'GU'),
'Pacific/Honolulu': ('Hawaiian Standard Time', '001'),
'Pacific/Johnston': ('Hawaiian Standard Time', 'UM'),
'Pacific/Kiritimati': ('Line Islands Standard Time', '001'),
'Pacific/Kosrae': ('Central Pacific Standard Time', 'FM'),
'Pacific/Kwajalein': ('UTC+12', 'MH'),
'Pacific/Majuro': ('UTC+12', 'MH'),
'Pacific/Marquesas': ('Marquesas Standard Time', '001'),
'Pacific/Midway': ('UTC-11', 'UM'),
'Pacific/Nauru': ('UTC+12', 'NR'),
'Pacific/Niue': ('UTC-11', 'NU'),
'Pacific/Norfolk': ('Norfolk Standard Time', '001'),
'Pacific/Noumea': ('Central Pacific Standard Time', 'NC'),
'Pacific/Pago_Pago': ('UTC-11', 'AS'),
'Pacific/Palau': ('Tokyo Standard Time', 'PW'),
'Pacific/Pitcairn': ('UTC-08', 'PN'),
'Pacific/Ponape': ('Central Pacific Standard Time', 'FM'),
'Pacific/Port_Moresby': ('West Pacific Standard Time', '001'),
'Pacific/Rarotonga': ('Hawaiian Standard Time', 'CK'),
'Pacific/Saipan': ('West Pacific Standard Time', 'MP'),
'Pacific/Tahiti': ('Hawaiian Standard Time', 'PF'),
'Pacific/Tarawa': ('UTC+12', 'KI'),
'Pacific/Tongatapu': ('Tonga Standard Time', '001'),
'Pacific/Truk': ('West Pacific Standard Time', 'FM'),
'Pacific/Wake': ('UTC+12', 'UM'),
'Pacific/Wallis': ('UTC+12', 'WF'),
}
# Add timezone names used by IANA that are not found in the CLDR.
# Use 'noterritory' unless you want to override the standard mapping
# (in which case, '001').
# TODO: A full list of the IANA names missing in CLDR can be found with:
# sorted(set(zoneinfo.available_timezones()) - set(CLDR_TO_MS_TIMEZONE_MAP))
IANA_TO_MS_TIMEZONE_MAP = dict(
CLDR_TO_MS_TIMEZONE_MAP,
**{
'Asia/Kolkata': ('India Standard Time', 'noterritory'),
'GMT': ('UTC', 'noterritory'),
'UTC': ('UTC', 'noterritory'),
}
)
# Reverse map from Microsoft timezone ID to IANA timezone name. Non-CLDR timezone ID's can be added here.
MS_TIMEZONE_TO_IANA_MAP = dict(
{v[0]: k for k, v in IANA_TO_MS_TIMEZONE_MAP.items() if v[1] == DEFAULT_TERRITORY},
**{
'tzone://Microsoft/Utc': 'UTC',
}
)
| {
"repo_name": "ecederstrand/exchangelib",
"path": "exchangelib/winzone.py",
"copies": "1",
"size": "29139",
"license": "bsd-2-clause",
"hash": -8291358970427594000,
"line_mean": 54.397338403,
"line_max": 115,
"alpha_frac": 0.6255533821,
"autogenerated": false,
"ratio": 3.0400625978090767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41656159799090764,
"avg_score": null,
"num_lines": null
} |
# Adidas Account Creator v2
# Dev: Simmy (bopped) Twitter: @Backdoorcook
import requests, time, os, json, sys
from classes.AdidasGen import AccountGEN
from colorama import *
init()
s = requests.Session()
def log(msg):
currenttime = time.strftime("%H:%M:%S")
sys.stdout.write("[%s] %s\n" % (currenttime, str(msg)))
sys.stdout.flush()
#Init
Region = ""
NumberofAccounts = 0
if len(sys.argv) == 1:
log("%s[ Auto Mode is now off! ]%s" % (Fore.RED,Style.RESET_ALL))
else:
try:
if sys.argv > 1:
Region = sys.argv[1]
NumberofAccounts = int(sys.argv[2])
[log("Arugement Loaded! Region [ %s%s%s ]" % (Fore.GREEN,Region,Style.RESET_ALL)) if Region != "" else ""]
[log("Arugement Loaded! Number of Accounts [ %s%s%s ]" % (Fore.GREEN,NumberofAccounts,Style.RESET_ALL)) if Region != "" else ""]
except:
log("%sYou Forgot to add Region or Number of accounts in your argument!!%s" % (Fore.RED,Style.RESET_ALL))
if not os.path.exists("config.json"):
log("%sConfig.json not Found!!!" % (Fore.RED))
exit()
log("-------------------------------")
log("%sConfiguration loaded.%s" % (Fore.GREEN,Style.RESET_ALL))
with open('config.json') as json_data_file:
config = json.load(json_data_file)
Start = AccountGEN(s,config)
log("%s%sRegions US | CA | GB | AU%s" % (Style.BRIGHT,Fore.BLUE,Style.RESET_ALL))
while True:
if Region == "":
Region = raw_input("Please Select a Region\t").upper()
Checked = True if Region == "US" or Region == "UK" or Region == "GB" or Region == "CA" or Region == "AU" else False
if not Checked:
log("%sSorry the following domain %s is not supported, or you mis-typed!%s" % (Fore.RED,Region,Style.RESET_ALL))
Region = ""
if Checked:
break
if NumberofAccounts == 0:
NumberofAccounts = int(raw_input("Enter Amount Of Accounts To Generate\t"))
log("We are Generating %d Accounts for Region | %s |" % (NumberofAccounts,Region))
if Region == "US":
Start.US(s,config,NumberofAccounts)
if Region == "UK" or Region == "GB":
Start.UK(s,config,NumberofAccounts)
if Region == "CA":
Start.CA(s, config, NumberofAccounts)
if Region == "AU":
Start.AU(s, config, NumberofAccounts)
| {
"repo_name": "bopped/Adidas-Account-Creator",
"path": "A.A.C.py",
"copies": "1",
"size": "2292",
"license": "mit",
"hash": 5614609385439579000,
"line_mean": 28.3846153846,
"line_max": 140,
"alpha_frac": 0.612565445,
"autogenerated": false,
"ratio": 3.105691056910569,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4218256501910569,
"avg_score": null,
"num_lines": null
} |
# A DID.
import binascii
from ..utils import encode_data
from ..utils import decode_data
from ..utils import create_encode_decode_formats
class Did(object):
"""A DID with identifier and other information.
"""
def __init__(self,
identifier,
name,
length,
datas):
self._identifier = identifier
self._name = name
self._length = length
self._datas = datas
self._codec = None
self.refresh()
@property
def identifier(self):
"""The did identifier as an integer.
"""
return self._identifier
@identifier.setter
def identifier(self, value):
self._identifier = value
@property
def name(self):
"""The did name as a string.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def length(self):
"""The did name as a string.
"""
return self._length
@length.setter
def length(self, value):
self._length = value
@property
def datas(self):
"""The did datas as a string.
"""
return self._datas
@datas.setter
def datas(self, value):
self._datas = value
def get_data_by_name(self, name):
for data in self._datas:
if data.name == name:
return data
raise KeyError(name)
def encode(self, data, scaling=True):
"""Encode given data as a DID of this type.
If `scaling` is ``False`` no scaling of datas is performed.
>>> foo = db.get_did_by_name('Foo')
>>> foo.encode({'Bar': 1, 'Fum': 5.0})
b'\\x01\\x45\\x23\\x00\\x11'
"""
encoded = encode_data(data,
self._codec['datas'],
self._codec['formats'],
scaling)
encoded |= (0x80 << (8 * self._length))
encoded = hex(encoded)[4:].rstrip('L')
return binascii.unhexlify(encoded)[:self._length]
def decode(self, data, decode_choices=True, scaling=True):
"""Decode given data as a DID of this type.
If `decode_choices` is ``False`` scaled values are not
converted to choice strings (if available).
If `scaling` is ``False`` no scaling of datas is performed.
>>> foo = db.get_did_by_name('Foo')
>>> foo.decode(b'\\x01\\x45\\x23\\x00\\x11')
{'Bar': 1, 'Fum': 5.0}
"""
return decode_data(data[:self._length],
self._codec['datas'],
self._codec['formats'],
decode_choices,
scaling)
def refresh(self):
"""Refresh the internal DID state.
"""
self._codec = {
'datas': self._datas,
'formats': create_encode_decode_formats(self._datas,
self._length)
}
def __repr__(self):
return "did('{}', 0x{:04x})".format(
self._name,
self._identifier)
| {
"repo_name": "eerimoq/cantools",
"path": "cantools/database/diagnostics/did.py",
"copies": "1",
"size": "3191",
"license": "mit",
"hash": 6972725602514066000,
"line_mean": 22.4632352941,
"line_max": 67,
"alpha_frac": 0.4957693513,
"autogenerated": false,
"ratio": 4.341496598639456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 136
} |
"""A DirectCheckButton is a type of button that toggles between two states
when clicked. It also has a separate indicator that can be modified
separately."""
__all__ = ['DirectCheckButton']
from panda3d.core import *
from .DirectButton import *
from .DirectLabel import *
class DirectCheckButton(DirectButton):
"""
DirectCheckButton(parent) - Create a DirectGuiWidget which responds
to mouse clicks by setting a state of on or off and execute a callback
function (passing that state through) if defined
"""
def __init__(self, parent = None, **kw):
# Inherits from DirectButton
# A Direct Frame can have:
# - A background texture (pass in path to image, or Texture Card)
# - A midground geometry item (pass in geometry)
# - A foreground text Node (pass in text string or Onscreen Text)
# For a direct button:
# Each button has 4 states (ready, press, rollover, disabled)
# The same image/geom/text can be used for all four states or each
# state can have a different text/geom/image
# State transitions happen automatically based upon mouse interaction
# Responds to click event and calls command if None
self.colors = None
optiondefs = (
('indicatorValue', 0, self.setIndicatorValue),
# boxBorder defines the space created around the check box
('boxBorder', 0, None),
# boxPlacement maps left, above, right, below
('boxPlacement', 'left', None),
('boxImage', None, None),
('boxImageScale', 1, None),
('boxImageColor', None, None),
('boxRelief', 'sunken', None),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize superclasses
DirectButton.__init__(self, parent)
self.indicator = self.createcomponent("indicator", (), None,
DirectLabel, (self,),
numStates = 2,
image = self['boxImage'],
image_scale = self['boxImageScale'],
image_color = self['boxImageColor'],
state = 'disabled',
text = ('X', 'X'),
relief = self['boxRelief'],
)
# Call option initialization functions
self.initialiseoptions(DirectCheckButton)
# After initialization with X giving it the correct size, put back space
if self['boxImage'] == None:
self.indicator['text'] = (' ', '*')
self.indicator['text_pos'] = (0, -.2)
else:
self.indicator['text'] = (' ', ' ')
if self['boxImageColor'] != None and self['boxImage'] != None:
self.colors = [VBase4(0, 0, 0, 0), self['boxImageColor']]
self.component('indicator')['image_color'] = VBase4(0, 0, 0, 0)
# Override the resetFrameSize of DirectGuiWidget inorder to provide space for label
def resetFrameSize(self):
self.setFrameSize(fClearFrame = 1)
def setFrameSize(self, fClearFrame = 0):
if self['frameSize']:
# Use user specified bounds
self.bounds = self['frameSize']
frameType = self.frameStyle[0].getType()
ibw = self.indicator['borderWidth']
else:
# Use ready state to compute bounds
frameType = self.frameStyle[0].getType()
if fClearFrame and (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(PGFrameStyle.TNone)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# To force an update of the button
self.guiItem.getStateDef(0)
# Clear out frame before computing bounds
self.getBounds()
# Restore frame style if necessary
if (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(frameType)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# Ok, they didn't set specific bounds,
# let's add room for the label indicator
# get the difference in height
ibw = self.indicator['borderWidth']
indicatorWidth = (self.indicator.getWidth() + (2*ibw[0]))
indicatorHeight = (self.indicator.getHeight() + (2*ibw[1]))
diff = (indicatorHeight + (2*self['boxBorder']) -
(self.bounds[3] - self.bounds[2]))
# If background is smaller then indicator, enlarge background
if diff > 0:
if self['boxPlacement'] == 'left': #left
self.bounds[0] += -(indicatorWidth + (2*self['boxBorder']))
self.bounds[3] += diff/2
self.bounds[2] -= diff/2
elif self['boxPlacement'] == 'below': #below
self.bounds[2] += -(indicatorHeight+(2*self['boxBorder']))
elif self['boxPlacement'] == 'right': #right
self.bounds[1] += indicatorWidth + (2*self['boxBorder'])
self.bounds[3] += diff/2
self.bounds[2] -= diff/2
else: #above
self.bounds[3] += indicatorHeight + (2*self['boxBorder'])
# Else make space on correct side for indicator
else:
if self['boxPlacement'] == 'left': #left
self.bounds[0] += -(indicatorWidth + (2*self['boxBorder']))
elif self['boxPlacement'] == 'below': #below
self.bounds[2] += -(indicatorHeight + (2*self['boxBorder']))
elif self['boxPlacement'] == 'right': #right
self.bounds[1] += indicatorWidth + (2*self['boxBorder'])
else: #above
self.bounds[3] += indicatorHeight + (2*self['boxBorder'])
# Set frame to new dimensions
if ((frameType != PGFrameStyle.TNone) and
(frameType != PGFrameStyle.TFlat)):
bw = self['borderWidth']
else:
bw = (0, 0)
# Set frame to new dimensions
self.guiItem.setFrame(
self.bounds[0] - bw[0],
self.bounds[1] + bw[0],
self.bounds[2] - bw[1],
self.bounds[3] + bw[1])
# If they didn't specify a position, put it in the center of new area
if not self.indicator['pos']:
bbounds = self.bounds
lbounds = self.indicator.bounds
newpos = [0, 0, 0]
if self['boxPlacement'] == 'left': #left
newpos[0] += bbounds[0]-lbounds[0] + self['boxBorder'] + ibw[0]
dropValue = (bbounds[3]-bbounds[2]-lbounds[3]+lbounds[2])/2 + self['boxBorder']
newpos[2] += (bbounds[3]-lbounds[3] + self['boxBorder'] -
dropValue)
elif self['boxPlacement'] == 'right': #right
newpos[0] += bbounds[1]-lbounds[1] - self['boxBorder'] - ibw[0]
dropValue = (bbounds[3]-bbounds[2]-lbounds[3]+lbounds[2])/2 + self['boxBorder']
newpos[2] += (bbounds[3]-lbounds[3] + self['boxBorder']
- dropValue)
elif self['boxPlacement'] == 'above': #above
newpos[2] += bbounds[3]-lbounds[3] - self['boxBorder'] - ibw[1]
else: #below
newpos[2] += bbounds[2]-lbounds[2] + self['boxBorder'] + ibw[1]
self.indicator.setPos(newpos[0], newpos[1], newpos[2])
def commandFunc(self, event):
self['indicatorValue'] = 1 - self['indicatorValue']
if self.colors != None:
self.component('indicator')['image_color'] = self.colors[self['indicatorValue']]
if self['command']:
# Pass any extra args to command
self['command'](*[self['indicatorValue']] + self['extraArgs'])
def setIndicatorValue(self):
self.component('indicator').guiItem.setState(self['indicatorValue'])
if self.colors != None:
self.component('indicator')['image_color'] = self.colors[self['indicatorValue']]
| {
"repo_name": "chandler14362/panda3d",
"path": "direct/src/gui/DirectCheckButton.py",
"copies": "9",
"size": "8631",
"license": "bsd-3-clause",
"hash": 5151856511611014000,
"line_mean": 45.4032258065,
"line_max": 95,
"alpha_frac": 0.5218398795,
"autogenerated": false,
"ratio": 4.332831325301205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9354671204801205,
"avg_score": null,
"num_lines": null
} |
# a direct copy of Lib/test/mapping_tests.py from Python 2.7
# tests common to dict and UserDict
import unittest
import UserDict
import _test_support as test_support
class BasicTestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
type2test = None # which class is being tested (overwrite in subclasses)
def _reference(self):
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {1:2, "key1":"value1", "key2":(1,2,3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key:value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key:value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = self.other.keys()[0]
self.assertRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#in
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
#has_key
with test_support.check_py3k_warnings(quiet=True):
for k in self.reference:
self.assertTrue(d.has_key(k))
for k in self.other:
self.assertFalse(d.has_key(k))
#cmp
self.assertEqual(cmp(p,p), 0)
self.assertEqual(cmp(d,d), 0)
self.assertEqual(cmp(p,d), -1)
self.assertEqual(cmp(d,p), 1)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, 'next'))
self.assertTrue(hasattr(iter, '__iter__'))
x = list(iter)
self.assertTrue(set(x)==set(lst)==set(ref))
check_iterandlist(d.iterkeys(), d.keys(), self.reference.keys())
check_iterandlist(iter(d), d.keys(), self.reference.keys())
check_iterandlist(d.itervalues(), d.values(), self.reference.values())
check_iterandlist(d.iteritems(), d.items(), self.reference.items())
#get
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.assertRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = p.items()
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefault
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.assertNotIn(knownkey, d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.assertNotIn(knownkey, d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.assertNotIn(key, d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assertTrue(not self._empty_mapping())
self.assertTrue(self.reference)
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(d.keys(), [])
d = self.reference
self.assertIn(self.inmapping.keys()[0], d.keys())
self.assertNotIn(self.other.keys()[0], d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(d.values(), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(d.items(), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(d[self.inmapping.keys()[0]], self.inmapping.values()[0])
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(d.items(), self.other.items())
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(d.items(), self.other.items())
# Iterator
d = self._empty_mapping()
d.update(self.other.iteritems())
self.assertEqual(d.items(), self.other.items())
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = d.items()
i2 = self.reference.items()
def safe_sort_key(kv):
k, v = kv
return id(type(k)), id(type(v)), k, v
i1.sort(key=safe_sort_key)
i2.sort(key=safe_sort_key)
self.assertEqual(i1, i2)
class Exc(Exception): pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(self.other.keys()[0]) is None)
self.assertEqual(d.get(self.other.keys()[0], 3), 3)
d = self.reference
self.assertTrue(d.get(self.other.keys()[0]) is None)
self.assertEqual(d.get(self.other.keys()[0], 3), 3)
self.assertEqual(d.get(self.inmapping.keys()[0]), self.inmapping.values()[0])
self.assertEqual(d.get(self.inmapping.keys()[0], 3), self.inmapping.values()[0])
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = self.inmapping.items()[0]
d[k] = v
self.assertRaises(KeyError, d.pop, self.other.keys()[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
| {
"repo_name": "shoyer/cyordereddict",
"path": "python2/cyordereddict/test/_mapping_tests.py",
"copies": "1",
"size": "10849",
"license": "mit",
"hash": -3349153126957979000,
"line_mean": 33.9967741935,
"line_max": 88,
"alpha_frac": 0.5581159554,
"autogenerated": false,
"ratio": 3.96672760511883,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.502484356051883,
"avg_score": null,
"num_lines": null
} |
'''A directed graph class.'''
class Digraph:
'''A directed graph class.'''
# Fundamental methods
def __init__(self, vertices=None, edges=None):
'''Initialize the graph with a container of vertices and/or a container
of edges, where edges are tuples containing a start node and end node.
'''
# Edges are stored as a dictionary mapping vertices to sets of vertices
# they connect to.
self._edges = {}
# Add vertices, then edges
if vertices != None: self.add_vertices_from(vertices)
if edges != None: self.add_edges_from(edges)
def clear(self):
'''Remove all vertices and edges from the graph.'''
self._edges = {}
# Addition methods
def add_edge(self, s, t):
'''Add an edge to the graph. Implicitly add vertices not already in the
graph.'''
self.add_vertex(s)
self.add_vertex(t)
self._edges[s].add(t)
def add_edges_from(self, container):
'''Add edges to the graph from a container of 2-tuples, where edges are
specified with a start vertex and end vertex. Implicitly add new
vertices.'''
# Assert that all edges are 2-tuples
for e in container:
assert len(e) == 2
# Add edges
for e in container:
self.add_edge(*e)
def add_vertex(self, vertex):
'''Add a new vertex to the graph or do nothing if it is already
present.'''
if vertex not in self._edges:
self._edges[vertex] = set()
def add_vertices_from(self, container):
'''Add vertices from a container to the graph.'''
for v in container:
self.add_vertex(v)
# Removal methods
def remove_edge(self, s, t):
'''Remove the pre-existing edge from vertex s to t from the graph.'''
assert self.has_edge(s, t)
self._edges[s].remove(t)
def remove_edges_from(self, container):
'''Remove all edges specified in a container from the graph, where edges
are 2-tuples of vertices.'''
for e in container:
assert len(e) == 2
for s, t in container:
self.remove_edge(s, t)
def remove_vertex(self, vertex):
'''Remove a pre-existing vertex from the graph and all of its edges.'''
assert self.has_vertex(vertex)
del self._edges[vertex]
for dest_set in self._edges.iteritems():
if vertex in dest_set:
dest_set.remove(vertex)
def remove_vertices_from(self, vertices):
'''Remove all vertices from the graph specified in a container and all
of their edges.'''
for v in vertices:
assert self.has_vertex(v)
for v in vertices:
self.remove_vertex(v)
def successors(self, vertex):
'''Return a list of the vertices a vertex has outgoing edges to.'''
assert self.has_vertex(vertex)
return list(self._edges[vertex])
def predecessors(self, vertex):
'''Return a list of the vertices a vertex has incoming edges from.'''
assert self.has_vertex(vertex)
return [s for (s, dest_set) in self._edges.iteritems() if vertex in dest_set]
def edges(self):
'''Return the edges in the graph as a set of 2-tuples, where each
tuple consists of the start vertex and the end vertex.'''
return set((s, t) for (s, dest) in self._edges.iteritems() for t in dest)
def has_edge(self, s, t):
'''Return a boolean value indicating whether the graph connects vertex
s to t.'''
return s in self._edges and t in self._edges[s]
def has_vertex(self, vertex):
'''Return a boolean value indicating whether the graph contains a
certain vertex.'''
return vertex in self._edges
def vertices(self):
'''Return a list of the vertices in the graph.'''
return self._edges.keys()
def cyclic(self):
'''Return whether the graph contains a cycle.'''
return is_cyclic_multi(self._edges.iterkeys(), lambda x: self._edges[x])
def __str__(self):
return 'vertices = %s\nedges = %s' % (self.vertices, self.edges)
_VISITING, _VISITED = range(2)
def is_cyclic(root, successor_func):
'''Determine whether a graph is cyclic. The graph is defined by a starting
node and a successor function which generates the child nodes of a node in
the graph. The nodes must be hashable.'''
visited = { root : _VISITING }
def visit(node):
for child in successor_func(node):
if child in visited:
if visited[child] == _VISITING:
return True
else:
visited[child] = _VISITING
if visit(child):
return True
visited[child] = _VISITED
return False
return visit(root)
def is_cyclic_multi(roots, successor_func):
'''Determine whether a graph is cyclic, given some subset of its nodes
which determine the starting points of the graph traversal.'''
visited = {}
def visit(nodes):
for node in nodes:
if node in visited:
if visited[node] == _VISITING:
return True
else:
visited[node] = _VISITING
if visit(successor_func(node)):
return True
visited[node] = _VISITED
return False
return visit(roots)
| {
"repo_name": "bdusell/pycfg",
"path": "src/util/digraph.py",
"copies": "1",
"size": "5530",
"license": "mit",
"hash": -9150266484362252000,
"line_mean": 34,
"line_max": 85,
"alpha_frac": 0.587522604,
"autogenerated": false,
"ratio": 4.26040061633282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.534792322033282,
"avg_score": null,
"num_lines": null
} |
"""a directed graph example."""
from sqlalchemy import Column, Integer, ForeignKey, \
create_engine
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Node(Base):
__tablename__ = 'node'
node_id = Column(Integer, primary_key=True)
def higher_neighbors(self):
return [x.higher_node for x in self.lower_edges]
def lower_neighbors(self):
return [x.lower_node for x in self.higher_edges]
class Edge(Base):
__tablename__ = 'edge'
lower_id = Column(
Integer,
ForeignKey('node.node_id'),
primary_key=True)
higher_id = Column(
Integer,
ForeignKey('node.node_id'),
primary_key=True)
lower_node = relationship(
Node,
primaryjoin=lower_id == Node.node_id,
backref='lower_edges')
higher_node = relationship(
Node,
primaryjoin=higher_id == Node.node_id,
backref='higher_edges')
def __init__(self, n1, n2):
self.lower_node = n1
self.higher_node = n2
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(engine)()
# create a directed graph like this:
# n1 -> n2 -> n1
# -> n5
# -> n7
# -> n3 -> n6
n1 = Node()
n2 = Node()
n3 = Node()
n4 = Node()
n5 = Node()
n6 = Node()
n7 = Node()
Edge(n1, n2)
Edge(n1, n3)
Edge(n2, n1)
Edge(n2, n5)
Edge(n2, n7)
Edge(n3, n6)
session.add_all([n1, n2, n3, n4, n5, n6, n7])
session.commit()
assert [x for x in n3.higher_neighbors()] == [n6]
assert [x for x in n3.lower_neighbors()] == [n1]
assert [x for x in n2.lower_neighbors()] == [n1]
assert [x for x in n2.higher_neighbors()] == [n1, n5, n7]
| {
"repo_name": "bdupharm/sqlalchemy",
"path": "examples/graphs/directed_graph.py",
"copies": "4",
"size": "1793",
"license": "mit",
"hash": -7305804733257350000,
"line_mean": 20.6024096386,
"line_max": 57,
"alpha_frac": 0.6006692694,
"autogenerated": false,
"ratio": 3.064957264957265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5665626534357266,
"avg_score": null,
"num_lines": null
} |
"""a directed graph example."""
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Node(Base):
__tablename__ = "node"
node_id = Column(Integer, primary_key=True)
def higher_neighbors(self):
return [x.higher_node for x in self.lower_edges]
def lower_neighbors(self):
return [x.lower_node for x in self.higher_edges]
class Edge(Base):
__tablename__ = "edge"
lower_id = Column(Integer, ForeignKey("node.node_id"), primary_key=True)
higher_id = Column(Integer, ForeignKey("node.node_id"), primary_key=True)
lower_node = relationship(
Node, primaryjoin=lower_id == Node.node_id, backref="lower_edges"
)
higher_node = relationship(
Node, primaryjoin=higher_id == Node.node_id, backref="higher_edges"
)
def __init__(self, n1, n2):
self.lower_node = n1
self.higher_node = n2
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(engine)()
# create a directed graph like this:
# n1 -> n2 -> n1
# -> n5
# -> n7
# -> n3 -> n6
n1 = Node()
n2 = Node()
n3 = Node()
n4 = Node()
n5 = Node()
n6 = Node()
n7 = Node()
Edge(n1, n2)
Edge(n1, n3)
Edge(n2, n1)
Edge(n2, n5)
Edge(n2, n7)
Edge(n3, n6)
session.add_all([n1, n2, n3, n4, n5, n6, n7])
session.commit()
assert [x for x in n3.higher_neighbors()] == [n6]
assert [x for x in n3.lower_neighbors()] == [n1]
assert [x for x in n2.lower_neighbors()] == [n1]
assert [x for x in n2.higher_neighbors()] == [n1, n5, n7]
| {
"repo_name": "sqlalchemy/sqlalchemy",
"path": "examples/graphs/directed_graph.py",
"copies": "7",
"size": "1808",
"license": "mit",
"hash": -6846146348185082000,
"line_mean": 21.8860759494,
"line_max": 77,
"alpha_frac": 0.6415929204,
"autogenerated": false,
"ratio": 3.0592216582064298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 79
} |
"""a directed graph example."""
from sqlalchemy import MetaData, Table, Column, Integer, ForeignKey, \
create_engine
from sqlalchemy.orm import mapper, relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Node(Base):
__tablename__ = 'node'
node_id = Column(Integer, primary_key=True)
def __init__(self, id):
self.node_id = id
def add_neighbors(self, *nodes):
for node in nodes:
Edge(self, node)
return self
def higher_neighbors(self):
return [x.higher_node for x in self.lower_edges]
def lower_neighbors(self):
return [x.lower_node for x in self.higher_edges]
class Edge(Base):
__tablename__ = 'edge'
lower_id = Column(Integer,
ForeignKey('node.node_id'),
primary_key=True)
higher_id = Column(Integer,
ForeignKey('node.node_id'),
primary_key=True)
lower_node = relationship(Node,
primaryjoin=lower_id==Node.node_id,
backref='lower_edges')
higher_node = relationship(Node,
primaryjoin=higher_id==Node.node_id,
backref='higher_edges')
# here we have lower.node_id <= higher.node_id
def __init__(self, n1, n2):
if n1.node_id < n2.node_id:
self.lower_node = n1
self.higher_node = n2
else:
self.lower_node = n2
self.higher_node = n1
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(engine)()
# create a directed graph like this:
# n1 -> n2 -> n5
# -> n7
# -> n3 -> n6
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n6 = Node(6)
n7 = Node(7)
n2.add_neighbors(n5, n1)
n3.add_neighbors(n6)
n7.add_neighbors(n2)
n1.add_neighbors(n3)
session.add_all([n1, n2, n3, n4, n5, n6, n7])
session.commit()
assert [x.node_id for x in n3.higher_neighbors()] == [6]
assert [x.node_id for x in n3.lower_neighbors()] == [1]
assert [x.node_id for x in n2.lower_neighbors()] == [1]
assert [x.node_id for x in n2.higher_neighbors()] == [5,7]
| {
"repo_name": "EvaSDK/sqlalchemy",
"path": "examples/graphs/directed_graph.py",
"copies": "30",
"size": "2297",
"license": "mit",
"hash": 1464621714463279600,
"line_mean": 25.4022988506,
"line_max": 70,
"alpha_frac": 0.5690030475,
"autogenerated": false,
"ratio": 3.3145743145743145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""a directed graph example."""
from sqlalchemy import MetaData, Table, Column, Integer, ForeignKey
from sqlalchemy.orm import mapper, relationship, create_session
import logging
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
meta = MetaData('sqlite://')
nodes = Table('nodes', meta,
Column("nodeid", Integer, primary_key=True)
)
# here we have lower.nodeid <= higher.nodeid
edges = Table('edges', meta,
Column("lower_id", Integer, ForeignKey('nodes.nodeid'), primary_key=True),
Column("higher_id", Integer, ForeignKey('nodes.nodeid'), primary_key=True)
)
meta.create_all()
class Node(object):
def __init__(self, id):
self.nodeid = id
def add_neighbor(self, othernode):
Edge(self, othernode)
def higher_neighbors(self):
return [x.higher_node for x in self.lower_edges]
def lower_neighbors(self):
return [x.lower_node for x in self.higher_edges]
class Edge(object):
def __init__(self, n1, n2):
if n1.nodeid < n2.nodeid:
self.lower_node = n1
self.higher_node = n2
else:
self.lower_node = n2
self.higher_node = n1
mapper(Node, nodes)
mapper(Edge, edges, properties={
'lower_node':relationship(Node,
primaryjoin=edges.c.lower_id==nodes.c.nodeid, backref='lower_edges'),
'higher_node':relationship(Node,
primaryjoin=edges.c.higher_id==nodes.c.nodeid, backref='higher_edges')
}
)
session = create_session()
# create a directed graph like this:
# n1 -> n2 -> n5
# -> n7
# -> n3 -> n6
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n6 = Node(6)
n7 = Node(7)
n2.add_neighbor(n5)
n3.add_neighbor(n6)
n7.add_neighbor(n2)
n1.add_neighbor(n3)
n2.add_neighbor(n1)
[session.add(x) for x in [n1, n2, n3, n4, n5, n6, n7]]
session.flush()
session.expunge_all()
n2 = session.query(Node).get(2)
n3 = session.query(Node).get(3)
assert [x.nodeid for x in n3.higher_neighbors()] == [6]
assert [x.nodeid for x in n3.lower_neighbors()] == [1]
assert [x.nodeid for x in n2.lower_neighbors()] == [1]
assert [x.nodeid for x in n2.higher_neighbors()] == [5,7]
| {
"repo_name": "simplegeo/sqlalchemy",
"path": "examples/graphs/directed_graph.py",
"copies": "1",
"size": "2192",
"license": "mit",
"hash": -6971476018967888000,
"line_mean": 24.488372093,
"line_max": 78,
"alpha_frac": 0.6423357664,
"autogenerated": false,
"ratio": 2.990450204638472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4132785971038472,
"avg_score": null,
"num_lines": null
} |
"""A directed graph."""
import collections
import copy
import numpy as np
import pygraphviz as pgz
class Graph(object):
"""A labeled, unweighted directed graph."""
def __init__(self):
self.nodes = []
self.edges = [] # triples (i, j, label)
self.label2index = collections.defaultdict(set)
self.out_edges = collections.defaultdict(set)
self.in_edges = collections.defaultdict(set)
self.edge_to_label = {}
self.conn_comps = [] # Connected components
@classmethod
def make_chain(cls, nodes):
"""Make a chain-structured graph from the list of nodes."""
g = cls()
for n in nodes:
g.add_node(n)
for i in range(len(nodes) - 1):
g.add_edge(i, i+1)
return g
@classmethod
def from_string(cls, s):
"""Load a Graph from a string generated by make_string()"""
g = cls()
toks = s.split(' ')
nodes = toks[:-1]
edges = [x.split(',') for x in toks[-1].split(';')]
for n in nodes:
g.add_node(n)
for e in edges:
e_new = [int(e[0]), int(e[1])] + e[2:]
g.add_edge(*e_new)
return g
def make_string(self):
"""Serialize the graph as a string."""
edge_str = ';'.join('%d,%d,%s' % (i, j, lab) for i, j, lab in self.edges)
return '%s %s' % (' '.join(self.nodes), edge_str)
def get_num_nodes(self):
return len(self.nodes)
def get_num_edges(self):
return len(self.edges)
def add_node(self, node_label):
new_index = len(self.nodes)
self.nodes.append(node_label)
self.label2index[node_label].add(new_index)
self.conn_comps.append(set([new_index]))
def check_index_in_range(self, ind):
if ind < 0 or ind >= len(self.nodes):
raise ValueError('Index %d not in range (len(nodes) == %d)' % (
ind, len(self.nodes)))
def add_edge(self, start, end, label='_'):
self.check_index_in_range(start)
self.check_index_in_range(end)
if (start, end) in self.edge_to_label:
raise ValueError('Edge between %d and %d already exists' % (start, end))
self.edges.append((start, end, label))
self.out_edges[start].add(end)
self.in_edges[end].add(start)
self.edge_to_label[(start, end)] = label
ind_start = self.find_conn_comp(start)
ind_end = self.find_conn_comp(end)
if ind_start != ind_end:
self.conn_comps[ind_start] |= self.conn_comps[ind_end]
self.conn_comps.pop(ind_end)
def add_graph(self, other):
base_index = len(self.nodes)
for label in other.nodes:
self.add_node(label)
for i, j, label in other.edges:
self.add_edge(base_index + i, base_index + j, label)
def find_conn_comp(self, index):
self.check_index_in_range(index)
for i, cc in enumerate(self.conn_comps):
if index in cc: return i
raise ValueError('Connected components missing node index %d' % index)
def has_edge(self, start, end, label=None):
"""Return if there exists an edge from start to end."""
if end not in self.out_edges[start]: return False
return (not label) or self.edge_to_label[(start, end)] == label
def has_undirected_edge(self, start, end, label=None):
"""Return if there exists an edge from start to end or end to start."""
return self.has_edge(start, end, label) or self.has_edge(end, start, label)
def get_adjacency_matrix(self):
"""Get a matrix where mat[i,j] == 1 iff there is an i->j edge."""
n = len(self.nodes)
mat = np.zeros((n, n), dtype=np.int64)
for i, j, label in self.edges:
mat[i,j] = 1
return mat
def toposort(self, start_at_sink=False):
"""Return a topological sort of the nodes.
In particular, finds a permutation topo_order of range(len(self.nodes))
such that topo_order[i]'s parents are in topo_order[:i].
In other words, the topological order starts with source nodes
and ends with sink nodes.
Args:
start_at_sink: if True, start at sink nodes and end at source nodes.
Returns:
A topological ordering of the nodes, or None if the graph is not a DAG.
"""
topo_order = []
in_degrees = [len(self.in_edges[i]) for i in range(len(self.nodes))]
source_nodes = [i for i, d in enumerate(in_degrees) if d == 0]
while len(topo_order) < len(self.nodes):
if len(source_nodes) == 0:
return None # graph is not a DAG
i = source_nodes.pop()
topo_order.append(i)
for j in self.out_edges[i]:
in_degrees[j] -= 1
if in_degrees[j] == 0:
source_nodes.append(j)
if start_at_sink:
topo_order = topo_order[::-1]
return topo_order
def is_connected(self):
return len(self.conn_comps) <= 1
def __str__(self):
node_str = ','.join(self.nodes)
edge_str = ';'.join('(%s)' % ','.join(str(t) for t in e) for e in self.edges)
return '(%s, nodes=[%s], edges=[%s])' % (self.__class__, node_str, edge_str)
def to_agraph(self, id_prefix=''):
"""Return a pygraphviz AGraph representation of the graph."""
def make_id(s):
return '%s-%s' % (id_prefix, s) if id_prefix else s
ag = pgz.AGraph(directed=True)
for i, label in enumerate(self.nodes):
ag.add_node(i, label=label, id=make_id('node%d' % i))
for index, (i, j, label) in enumerate(self.edges):
ag.add_edge(i, j, label=label, id=make_id('edge%d' % index))
return ag
def draw_svg(self, id_prefix='', filename=None, horizontal=False):
"""Render the graph as SVG, either to a string or to a file."""
ag = self.to_agraph(id_prefix=id_prefix)
args = '-Grankdir=LR' if horizontal else ''
ag.layout('dot', args=args)
if filename:
# Write to file
svg_str = ag.draw(filename)
else:
# Write to string, return the string
svg_str = ag.draw(format='svg')
start_ind = svg_str.index('<svg') # Strip the header
return svg_str[start_ind:]
class Subgraph(Graph):
"""A subgraph of a parent graph.
The subgraph is constructed incrementally, and at each stage,
it ensures that the current operation (adding a node or adding an edge)
maintains the property that there is some injection from the
subgraph's nodes to the parent graph's nodes that makes it a subgraph.
"""
def __init__(self, parent_graph):
super(Subgraph, self).__init__()
self.parent_graph = parent_graph
self.funcs = [{}] # All consistent maps from self.nodes to self.parent_graph.nodes
self.counts_left = collections.Counter(parent_graph.nodes)
def add_node(self, node_label):
if not self.can_add_node(node_label):
raise ValueError('Cannot add node "%s" to subgraph' % node_label)
super(Subgraph, self).add_node(node_label)
self.counts_left[node_label] -= 1
i = len(self.nodes) - 1
new_funcs = []
for func in self.funcs:
used_vals = set(func.values())
free_vals = self.parent_graph.label2index[node_label] - used_vals
for j in free_vals:
new_func = func.copy()
new_func[i] = j
new_funcs.append(new_func)
self.funcs = new_funcs
def add_edge(self, start, end, label=None):
if not self.can_add_edge(start, end, label):
raise ValueError('Cannot add edge (%d, %d) to subgraph' % (start, end))
super(Subgraph, self).add_edge(start, end, label)
self.funcs = [func for func in self.funcs
if self.parent_graph.has_edge(func[start], func[end])]
def add_graph(self, other):
if not self.can_add_graph(other):
raise ValueError('Cannot add graph %s to subgraph' % other)
super(Subgraph, self).add_graph(other)
# add_graph() calls add_node() and add_edge()
# which will update funcs as appropriate.
def can_add_node(self, node_label):
return self.counts_left[node_label] > 0
def can_add_edge(self, start, end, label):
for func in self.funcs:
if self.parent_graph.has_edge(func[start], func[end], label):
return True
return False
def can_add_graph(self, other):
base_index = len(self.nodes)
for func in self.funcs:
success = True
g = copy.deepcopy(self) # Need to deepcopy since we need to mutate
for label in other.nodes:
if g.can_add_node(label):
g.add_node(label)
else:
success = False
break
if not success: continue
for i, j, label in other.edges:
if g.can_add_edge(base_index + i, base_index + j, label):
g.add_edge(base_index + i, base_index + j, label)
else:
success = False
break
if not success: continue
if success: return True
return False
def is_finished(self):
return (len(self.nodes) == len(self.parent_graph.nodes) and
len(self.edges) == len(self.parent_graph.edges))
def get_valid_new_nodes(self):
"""Get a list of all node labels that can be added."""
return list(x for x in self.counts_left if self.counts_left[x] > 0)
| {
"repo_name": "robinjia/nectar",
"path": "nectar/base/graph.py",
"copies": "1",
"size": "8851",
"license": "mit",
"hash": -519163522042470850,
"line_mean": 33.9841897233,
"line_max": 87,
"alpha_frac": 0.6247881595,
"autogenerated": false,
"ratio": 3.285449146250928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4410237305750928,
"avg_score": null,
"num_lines": null
} |
"""A DirectFrame is a basic DirectGUI component that acts as the base
class for various other components, and can also serve as a basic
container to hold other DirectGUI components.
A DirectFrame can have:
* A background texture (pass in path to image, or Texture Card)
* A midground geometry item (pass in geometry)
* A foreground text Node (pass in text string or OnscreenText)
Each of these has 1 or more states. The same object can be used for
all states or each state can have a different text/geom/image (for
radio button and check button indicators, for example).
"""
__all__ = ['DirectFrame']
from panda3d.core import *
from . import DirectGuiGlobals as DGG
from .DirectGuiBase import *
from .OnscreenImage import OnscreenImage
from .OnscreenGeom import OnscreenGeom
import sys
if sys.version_info >= (3, 0):
stringType = str
else:
stringType = basestring
class DirectFrame(DirectGuiWidget):
DefDynGroups = ('text', 'geom', 'image')
def __init__(self, parent = None, **kw):
# Inherits from DirectGuiWidget
optiondefs = (
# Define type of DirectGuiWidget
('pgFunc', PGItem, None),
('numStates', 1, None),
('state', self.inactiveInitState, None),
# Frame can have:
# A background texture
('image', None, self.setImage),
# A midground geometry item
('geom', None, self.setGeom),
# A foreground text node
('text', None, self.setText),
# Change default value of text mayChange flag from 0
# (OnscreenText.py) to 1
('textMayChange', 1, None),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs,
dynamicGroups = DirectFrame.DefDynGroups)
# Initialize superclasses
DirectGuiWidget.__init__(self, parent)
# Call option initialization functions
self.initialiseoptions(DirectFrame)
def destroy(self):
DirectGuiWidget.destroy(self)
def clearText(self):
self['text'] = None
self.setText()
def setText(self, text=None):
if text is not None:
self['text'] = text
# Determine if user passed in single string or a sequence
if self['text'] == None:
textList = (None,) * self['numStates']
elif isinstance(self['text'], stringType):
# If just passing in a single string, make a tuple out of it
textList = (self['text'],) * self['numStates']
else:
# Otherwise, hope that the user has passed in a tuple/list
textList = self['text']
# Create/destroy components
for i in range(self['numStates']):
component = 'text' + repr(i)
# If fewer items specified than numStates,
# just repeat last item
try:
text = textList[i]
except IndexError:
text = textList[-1]
if self.hascomponent(component):
if text == None:
# Destroy component
self.destroycomponent(component)
else:
self[component + '_text'] = text
else:
if text == None:
return
else:
from .OnscreenText import OnscreenText
self.createcomponent(
component, (), 'text',
OnscreenText,
(), parent = self.stateNodePath[i],
text = text, scale = 1, mayChange = self['textMayChange'],
sort = DGG.TEXT_SORT_INDEX,
)
def clearGeom(self):
self['geom'] = None
self.setGeom()
def setGeom(self, geom=None):
if geom is not None:
self['geom'] = geom
# Determine argument type
geom = self['geom']
if geom == None:
# Passed in None
geomList = (None,) * self['numStates']
elif isinstance(geom, NodePath) or \
isinstance(geom, stringType):
# Passed in a single node path, make a tuple out of it
geomList = (geom,) * self['numStates']
else:
# Otherwise, hope that the user has passed in a tuple/list
geomList = geom
# Create/destroy components
for i in range(self['numStates']):
component = 'geom' + repr(i)
# If fewer items specified than numStates,
# just repeat last item
try:
geom = geomList[i]
except IndexError:
geom = geomList[-1]
if self.hascomponent(component):
if geom == None:
# Destroy component
self.destroycomponent(component)
else:
self[component + '_geom'] = geom
else:
if geom == None:
return
else:
self.createcomponent(
component, (), 'geom',
OnscreenGeom,
(), parent = self.stateNodePath[i],
geom = geom, scale = 1,
sort = DGG.GEOM_SORT_INDEX)
def clearImage(self):
self['image'] = None
self.setImage()
def setImage(self, image=None):
if image is not None:
self['image'] = image
# Determine argument type
arg = self['image']
if arg == None:
# Passed in None
imageList = (None,) * self['numStates']
elif isinstance(arg, NodePath) or \
isinstance(arg, Texture) or \
isinstance(arg, stringType):
# Passed in a single node path, make a tuple out of it
imageList = (arg,) * self['numStates']
else:
# Otherwise, hope that the user has passed in a tuple/list
if ((len(arg) == 2) and
isinstance(arg[0], stringType) and
isinstance(arg[1], stringType)):
# Its a model/node pair of strings
imageList = (arg,) * self['numStates']
else:
# Assume its a list of node paths
imageList = arg
# Create/destroy components
for i in range(self['numStates']):
component = 'image' + repr(i)
# If fewer items specified than numStates,
# just repeat last item
try:
image = imageList[i]
except IndexError:
image = imageList[-1]
if self.hascomponent(component):
if image == None:
# Destroy component
self.destroycomponent(component)
else:
self[component + '_image'] = image
else:
if image == None:
return
else:
self.createcomponent(
component, (), 'image',
OnscreenImage,
(), parent = self.stateNodePath[i],
image = image, scale = 1,
sort = DGG.IMAGE_SORT_INDEX)
| {
"repo_name": "chandler14362/panda3d",
"path": "direct/src/gui/DirectFrame.py",
"copies": "6",
"size": "7506",
"license": "bsd-3-clause",
"hash": 1846417425762753800,
"line_mean": 34.4056603774,
"line_max": 82,
"alpha_frac": 0.5070610179,
"autogenerated": false,
"ratio": 4.7626903553299496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0044373417226369685,
"num_lines": 212
} |
"""A direct light on a fast controller."""
import logging
from mpf.core.utility_functions import Util
from mpf.platforms.interfaces.light_platform_interface import LightPlatformSoftwareFade
class FASTMatrixLight(LightPlatformSoftwareFade):
"""A direct light on a fast controller."""
__slots__ = ["log", "number", "send", "platform"]
# pylint: disable-msg=too-many-arguments
def __init__(self, number, sender, machine, fade_interval_ms: int, platform) -> None:
"""Initialise light."""
super().__init__(number, machine.clock.loop, fade_interval_ms)
self.log = logging.getLogger('FASTMatrixLight')
self.send = sender
self.platform = platform
def set_brightness(self, brightness: float):
"""Set matrix light brightness."""
self.send('L1:{},{}'.format(self.number, Util.int_to_hex_string(int(brightness * 255))))
def get_board_name(self):
"""Return the board of this light."""
if self.platform.machine_type == 'wpc':
return "FAST WPC"
coil_index = 0
number = Util.hex_string_to_int(self.number)
for board_obj in self.platform.io_boards.values():
if coil_index <= number < coil_index + board_obj.driver_count:
return "FAST Board {}".format(str(board_obj.node_id))
coil_index += board_obj.driver_count
# fall back if not found
return "FAST Unknown Board"
def is_successor_of(self, other):
"""Return true if the other light has the previous number."""
raise AssertionError("Not possible in FASTMatrix.")
def get_successor_number(self):
"""Return next number."""
raise AssertionError("Not possible in FASTMatrix.")
def __lt__(self, other):
"""Order lights by string."""
return self.number < other.number
| {
"repo_name": "missionpinball/mpf",
"path": "mpf/platforms/fast/fast_light.py",
"copies": "1",
"size": "1856",
"license": "mit",
"hash": 3086329327736481000,
"line_mean": 35.3921568627,
"line_max": 96,
"alpha_frac": 0.6325431034,
"autogenerated": false,
"ratio": 3.9743040685224837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5106847171922484,
"avg_score": null,
"num_lines": null
} |
"""A directory browser using Ttk Treeview.
Based on an example found at:
http://bitwalk.blogspot.com/2008/01/ttktreeview.html
"""
import os
import glob
import ttk
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
def populate_tree(tree, node):
if tree.set(node, "type") != 'directory':
return
path = tree.set(node, "fullpath")
tree.delete(tree.get_children(node))
parent = tree.parent(node)
special_dirs = [] if parent else glob.glob('.') + glob.glob('..')
for p in special_dirs + os.listdir(path):
ptype = None
p = os.path.join(path, p).replace('\\', '/')
if os.path.isdir(p): ptype = "directory"
elif os.path.isfile(p): ptype = "file"
fname = os.path.split(p)[1]
id = tree.insert(node, "end", text=fname, values=[p, ptype])
if ptype == 'directory':
if fname not in ('.', '..'):
tree.insert(id, 0, text="dummy")
tree.item(id, text=fname)
elif ptype == 'file':
size = os.stat(p).st_size
tree.set(id, "size", "%d bytes" % size)
def populate_roots(tree):
dir = os.path.abspath('.').replace('\\', '/')
node = tree.insert('', 'end', text=dir, values=[dir, "directory"])
populate_tree(tree, node)
def update_tree(event):
tree = event.widget
populate_tree(tree, tree.focus())
def change_dir(event):
tree = event.widget
node = tree.focus()
if tree.parent(node):
path = os.path.abspath(tree.set(node, "fullpath"))
if os.path.isdir(path):
os.chdir(path)
tree.delete(tree.get_children(''))
populate_roots(tree)
def autoscroll(sbar, first, last):
"""Hide and show scrollbar as needed.
Code from Joe English (JE) at http://wiki.tcl.tk/950"""
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
root = Tkinter.Tk()
vsb = ttk.Scrollbar(orient="vertical")
hsb = ttk.Scrollbar(orient="horizontal")
tree = ttk.Treeview(root, columns=("fullpath", "type", "size"),
displaycolumns="size", yscrollcommand=lambda f, l: autoscroll(vsb, f, l),
xscrollcommand=lambda f, l:autoscroll(hsb, f, l))
vsb['command'] = tree.yview
hsb['command'] = tree.xview
tree.heading("#0", text="Directory Structure", anchor='w')
tree.heading("size", text="File Size", anchor='w')
tree.column("size", stretch=0, width=100)
populate_roots(tree)
tree.bind('<<TreeviewOpen>>', update_tree)
tree.bind('<Double-Button-1>', change_dir)
# Arrange the tree and its scrollbars in the toplevel
tree.grid(column=0, row=0, sticky='nswe')
vsb.grid(column=1, row=0, sticky='ns')
hsb.grid(column=0, row=1, sticky='ew')
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
root.mainloop()
| {
"repo_name": "karimbahgat/PythonGis",
"path": "pythongis/app/tk2/_othermisc/dirtree.py",
"copies": "2",
"size": "2874",
"license": "mit",
"hash": 618542027305913200,
"line_mean": 27.1764705882,
"line_max": 77,
"alpha_frac": 0.6169102296,
"autogenerated": false,
"ratio": 3.2292134831460673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4846123712746067,
"avg_score": null,
"num_lines": null
} |
"""A directory browser using Ttk Treeview.
Based on the demo found in Tk 8.5 library/demos/browse
"""
import os
import glob
import Tkinter
import ttk
def populate_tree(tree, node):
if tree.set(node, "type") != 'directory':
return
path = tree.set(node, "fullpath")
tree.delete(*tree.get_children(node))
parent = tree.parent(node)
special_dirs = [] if parent else glob.glob('.') + glob.glob('..')
for p in special_dirs + os.listdir(path):
ptype = None
p = os.path.join(path, p).replace('\\', '/')
if os.path.isdir(p): ptype = "directory"
elif os.path.isfile(p): ptype = "file"
fname = os.path.split(p)[1]
id = tree.insert(node, "end", text=fname, values=[p, ptype])
if ptype == 'directory':
if fname not in ('.', '..'):
tree.insert(id, 0, text="dummy")
tree.item(id, text=fname)
elif ptype == 'file':
size = os.stat(p).st_size
tree.set(id, "size", "%d bytes" % size)
def populate_roots(tree):
dir = os.path.abspath('.').replace('\\', '/')
node = tree.insert('', 'end', text=dir, values=[dir, "directory"])
populate_tree(tree, node)
def update_tree(event):
tree = event.widget
populate_tree(tree, tree.focus())
def change_dir(event):
tree = event.widget
node = tree.focus()
if tree.parent(node):
path = os.path.abspath(tree.set(node, "fullpath"))
if os.path.isdir(path):
os.chdir(path)
tree.delete(tree.get_children(''))
populate_roots(tree)
def autoscroll(sbar, first, last):
"""Hide and show scrollbar as needed."""
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
root = Tkinter.Tk()
vsb = ttk.Scrollbar(orient="vertical")
hsb = ttk.Scrollbar(orient="horizontal")
tree = ttk.Treeview(columns=("fullpath", "type", "size"),
displaycolumns="size", yscrollcommand=lambda f, l: autoscroll(vsb, f, l),
xscrollcommand=lambda f, l:autoscroll(hsb, f, l))
vsb['command'] = tree.yview
hsb['command'] = tree.xview
tree.heading("#0", text="Directory Structure", anchor='w')
tree.heading("size", text="File Size", anchor='w')
tree.column("size", stretch=0, width=100)
populate_roots(tree)
tree.bind('<<TreeviewOpen>>', update_tree)
tree.bind('<Double-Button-1>', change_dir)
# Arrange the tree and its scrollbars in the toplevel
tree.grid(column=0, row=0, sticky='nswe')
vsb.grid(column=1, row=0, sticky='ns')
hsb.grid(column=0, row=1, sticky='ew')
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0, weight=1)
root.mainloop()
| {
"repo_name": "nzavagli/UnrealPy",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Demo/tkinter/ttk/dirbrowser.py",
"copies": "10",
"size": "2717",
"license": "mit",
"hash": 6238562078933070000,
"line_mean": 28.2150537634,
"line_max": 77,
"alpha_frac": 0.6131762974,
"autogenerated": false,
"ratio": 3.2230130486358246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003091577736465874,
"num_lines": 93
} |
"""A DirectRadioButton is a type of button that, similar to a
DirectCheckButton, has a separate indicator and can be toggled between
two states. However, only one DirectRadioButton in a group can be enabled
at a particular time."""
__all__ = ['DirectRadioButton']
from panda3d.core import *
from . import DirectGuiGlobals as DGG
from .DirectButton import *
from .DirectLabel import *
class DirectRadioButton(DirectButton):
"""
DirectRadioButton(parent) - Create a DirectGuiWidget which responds
to mouse clicks by setting given value to given variable and
execute a callback function (passing that state through) if defined
"""
def __init__(self, parent = None, **kw):
# Inherits from DirectButton
# A Direct Frame can have:
# - A background texture (pass in path to image, or Texture Card)
# - A midground geometry item (pass in geometry)
# - A foreground text Node (pass in text string or Onscreen Text)
# For a direct button:
# Each button has 4 states (ready, press, rollover, disabled)
# The same image/geom/text can be used for all four states or each
# state can have a different text/geom/image
# State transitions happen automatically based upon mouse interaction
# Responds to click event and calls command if None
self.colors = None
optiondefs = (
('indicatorValue', 0, self.setIndicatorValue),
# variable is a list whose value will be set by this radio button
('variable', [], None),
# value is the value to be set when this radio button is selected
('value', [], None),
# others is a list of other radio buttons sharing same variable
('others', [], None),
# boxBorder defines the space created around the check box
('boxBorder', 0, None),
# boxPlacement maps left, above, right, below
('boxPlacement', 'left', None),
# boxGeom defines geom to indicate current radio button is selected or not
('boxGeom', None, None),
('boxGeomColor', None, None),
('boxGeomScale', 1.0, None),
('boxImage', None, None),
('boxImageScale', 1.0, None),
('boxImageColor', VBase4(1, 1, 1, 1), None),
('boxRelief', None, None),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize superclasses
DirectButton.__init__(self, parent)
self.indicator = self.createcomponent("indicator", (), None,
DirectLabel, (self,),
numStates = 2,
image = self['boxImage'],
image_scale = self['boxImageScale'],
image_color = self['boxImageColor'],
geom = self['boxGeom'],
geom_scale = self['boxGeomScale'],
geom_color = self['boxGeomColor'],
state = 'disabled',
text = ('X', 'X'),
relief = self['boxRelief'],
)
# Call option initialization functions
self.initialiseoptions(DirectRadioButton)
# After initialization with X giving it the correct size, put back space
if self['boxGeom'] is None:
if not 'boxRelief' in kw and self['boxImage'] is None:
self.indicator['relief'] = DGG.SUNKEN
self.indicator['text'] = (' ', '*')
self.indicator['text_pos'] = (0, -.25)
else:
self.indicator['text'] = (' ', ' ')
if self['boxGeomColor'] != None and self['boxGeom'] != None:
self.colors = [VBase4(1, 1, 1, 0), self['boxGeomColor']]
self.component('indicator')['geom_color'] = VBase4(1, 1, 1, 0)
needToCheck = True
if len(self['value']) == len(self['variable']) != 0:
for i in range(len(self['value'])):
if self['variable'][i] != self['value'][i]:
needToCheck = False
break
if needToCheck:
self.check()
# Override the resetFrameSize of DirectGuiWidget inorder to provide space for label
def resetFrameSize(self):
self.setFrameSize(fClearFrame = 1)
def setFrameSize(self, fClearFrame = 0):
if self['frameSize']:
# Use user specified bounds
self.bounds = self['frameSize']
frameType = self.frameStyle[0].getType()
ibw = self.indicator['borderWidth']
else:
# Use ready state to compute bounds
frameType = self.frameStyle[0].getType()
if fClearFrame and (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(PGFrameStyle.TNone)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# To force an update of the button
self.guiItem.getStateDef(0)
# Clear out frame before computing bounds
self.getBounds()
# Restore frame style if necessary
if (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(frameType)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# Ok, they didn't set specific bounds,
# let's add room for the label indicator
# get the difference in height
ibw = self.indicator['borderWidth']
indicatorWidth = (self.indicator.getWidth() + (2*ibw[0]))
indicatorHeight = (self.indicator.getHeight() + (2*ibw[1]))
diff = (indicatorHeight + (2*self['boxBorder']) -
(self.bounds[3] - self.bounds[2]))
# If background is smaller then indicator, enlarge background
if diff > 0:
if self['boxPlacement'] == 'left': #left
self.bounds[0] += -(indicatorWidth + (2*self['boxBorder']))
self.bounds[3] += diff/2
self.bounds[2] -= diff/2
elif self['boxPlacement'] == 'below': #below
self.bounds[2] += -(indicatorHeight+(2*self['boxBorder']))
elif self['boxPlacement'] == 'right': #right
self.bounds[1] += indicatorWidth + (2*self['boxBorder'])
self.bounds[3] += diff/2
self.bounds[2] -= diff/2
else: #above
self.bounds[3] += indicatorHeight + (2*self['boxBorder'])
# Else make space on correct side for indicator
else:
if self['boxPlacement'] == 'left': #left
self.bounds[0] += -(indicatorWidth + (2*self['boxBorder']))
elif self['boxPlacement'] == 'below': #below
self.bounds[2] += -(indicatorHeight + (2*self['boxBorder']))
elif self['boxPlacement'] == 'right': #right
self.bounds[1] += indicatorWidth + (2*self['boxBorder'])
else: #above
self.bounds[3] += indicatorHeight + (2*self['boxBorder'])
# Set frame to new dimensions
if ((frameType != PGFrameStyle.TNone) and
(frameType != PGFrameStyle.TFlat)):
bw = self['borderWidth']
else:
bw = (0, 0)
# Set frame to new dimensions
self.guiItem.setFrame(
self.bounds[0] - bw[0],
self.bounds[1] + bw[0],
self.bounds[2] - bw[1],
self.bounds[3] + bw[1])
# If they didn't specify a position, put it in the center of new area
if not self.indicator['pos']:
bbounds = self.bounds
lbounds = self.indicator.bounds
newpos = [0, 0, 0]
if self['boxPlacement'] == 'left': #left
newpos[0] += bbounds[0]-lbounds[0] + self['boxBorder'] + ibw[0]
dropValue = (bbounds[3]-bbounds[2]-lbounds[3]+lbounds[2])/2 + self['boxBorder']
newpos[2] += (bbounds[3]-lbounds[3] + self['boxBorder'] -
dropValue)
elif self['boxPlacement'] == 'right': #right
newpos[0] += bbounds[1]-lbounds[1] - self['boxBorder'] - ibw[0]
dropValue = (bbounds[3]-bbounds[2]-lbounds[3]+lbounds[2])/2 + self['boxBorder']
newpos[2] += (bbounds[3]-lbounds[3] + self['boxBorder']
- dropValue)
elif self['boxPlacement'] == 'above': #above
newpos[2] += bbounds[3]-lbounds[3] - self['boxBorder'] - ibw[1]
else: #below
newpos[2] += bbounds[2]-lbounds[2] + self['boxBorder'] + ibw[1]
self.indicator.setPos(newpos[0], newpos[1], newpos[2])
def commandFunc(self, event):
if len(self['value']) == len(self['variable']) != 0:
for i in range(len(self['value'])):
self['variable'][i] = self['value'][i]
self.check()
def check(self):
self['indicatorValue'] = 1
self.setIndicatorValue()
for other in self['others']:
if other != self:
other.uncheck()
if self['command']:
# Pass any extra args to command
self['command'](*self['extraArgs'])
def setOthers(self, others):
self['others'] = others
def uncheck(self):
self['indicatorValue'] = 0
if self.colors != None:
self.component('indicator')['geom_color'] = self.colors[self['indicatorValue']]
def setIndicatorValue(self):
self.component('indicator').guiItem.setState(self['indicatorValue'])
if self.colors != None:
self.component('indicator')['geom_color'] = self.colors[self['indicatorValue']]
| {
"repo_name": "chandler14362/panda3d",
"path": "direct/src/gui/DirectRadioButton.py",
"copies": "9",
"size": "10371",
"license": "bsd-3-clause",
"hash": 4435948626262002700,
"line_mean": 45.2991071429,
"line_max": 95,
"alpha_frac": 0.5175007232,
"autogenerated": false,
"ratio": 4.370417193426043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9387917916626043,
"avg_score": null,
"num_lines": null
} |
#A dirty "sync" example.
#TODO: 1. Re-Write with twisted for async
#2. Improve on the code.
import urllib2
import simplejson
import random
import time
from threading import Thread
import threadpool
import os
PUBLISHER_URL = 'http://localhost:8080/publish/?channel=232'
SUBSCRIBER_URL = 'http://localhost:8080/activity/?channel=232'
import logging
logging.basicConfig(level = logging.DEBUG)
log = logging.getLogger('test.py')
def publish(i):
print i
res = urllib2.urlopen(PUBLISHER_URL, \
data = simplejson.dumps(\
{'message': 'hello world %d' % i}))
return
def subscribe(num):
et = None
last = None
while True:
req = urllib2.Request(SUBSCRIBER_URL,
headers={'If-None-Match':et, \
'If-Modified-Since': last})
resp = urllib2.urlopen(req)
et = resp.headers['etag']
last = resp.headers['last-modified']
log.info('Subscriber:%d \r\nmsg:: %s\n' %
(num, resp.read()))
if __name__ == '__main__':
pool = threadpool.ThreadPool(10)
requests = threadpool.makeRequests(subscribe, range(20))
[pool.putRequest(request) for request in requests]
for i in range(100):
publish(i)
time.sleep(random.randint(3, 4))
| {
"repo_name": "jedisct1/nginx_http_push_module",
"path": "tests/test.py",
"copies": "2",
"size": "1342",
"license": "mit",
"hash": -6370660774068876000,
"line_mean": 28.1739130435,
"line_max": 70,
"alpha_frac": 0.5953800298,
"autogenerated": false,
"ratio": 3.696969696969697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016302893161747142,
"num_lines": 46
} |
# A disassembler for Luz.
# Converts binary words into mnemonic assembly instructions
#
# Luz micro-controller assembler
# Eli Bendersky (C) 2008-2010
#
import pprint, os, sys
from collections import defaultdict
from ..commonlib.utils import (
extract_bitfield, signed2int)
from ..commonlib.luz_opcodes import *
from .asm_instructions import register_alias_of
class DisassembleError(Exception): pass
def disassemble(word, replace_alias=False):
""" Given a word (32-bit integer) returns the mnemonic
assembly instruction it represents, as a string.
replace_alias:
If True, register numbers are replaced with their
aliases.
DisassembleError can be raised in case of errors.
"""
# the opcode
opcode = extract_bitfield(word, 31, 26)
regnamer = _reg_name_alias if replace_alias else _reg_name_normal
# dispatch
if opcode in _OP:
dispatch = _OP[opcode]
func, name = dispatch[0], dispatch[1]
return func(word, name, regnamer)
else:
raise DisassembleError('unknown opcode %X' % opcode)
##################################################################
def _reg_name_normal(regnum):
return '$r%s' % regnum
def _reg_name_alias(regnum):
return register_alias_of[regnum]
def _dis_generic_3reg(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
rt = extract_bitfield(word, 15, 11)
return '%s %s, %s, %s' % (name, regnamer(rd), regnamer(rs), regnamer(rt))
def _dis_generic_2reg_imm(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
imm16 = extract_bitfield(word, 15, 0)
return '%s %s, %s, 0x%X' % (name, regnamer(rd), regnamer(rs), imm16)
def _dis_generic_1reg_imm(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
imm16 = extract_bitfield(word, 15, 0)
return '%s %s, 0x%X' % (name, regnamer(rd), imm16)
def _dis_generic_1reg(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
return '%s %s' % (name, regnamer(rd))
def _dis_call(word, name, regnamer):
imm26 = extract_bitfield(word, 25, 0)
# annotate with the actual jump address (multiplied by 4)
return '%s 0x%X [0x%X]' % (name, imm26, imm26 * 4)
def _dis_generic_offset26(word, name, regnamer):
offset = signed2int(extract_bitfield(word, 25, 0))
return '%s %d' % (name, offset)
def _dis_load(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
offset = signed2int(extract_bitfield(word, 15, 0), nbits=16)
return '%s %s, %d(%s)' % (name, regnamer(rd), offset, regnamer(rs))
def _dis_store(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
offset = signed2int(extract_bitfield(word, 15, 0), nbits=16)
return '%s %s, %d(%s)' % (name, regnamer(rs), offset, regnamer(rd))
def _dis_noop(word, name, regnamer):
return '%s' % name
def _dis_branch(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
offset = signed2int(extract_bitfield(word, 15, 0), nbits=16)
return '%s %s, %s, %d' % (name, regnamer(rd), regnamer(rs), offset)
# Maps opcodes to functions that dissasemble them
#
_OP = {
OP_ADD: (_dis_generic_3reg, 'add'),
OP_ADDI: (_dis_generic_2reg_imm, 'addi'),
OP_SUB: (_dis_generic_3reg, 'sub'),
OP_SUBI: (_dis_generic_2reg_imm, 'subi'),
OP_MULU: (_dis_generic_3reg, 'mulu'),
OP_MUL: (_dis_generic_3reg, 'mul'),
OP_DIVU: (_dis_generic_3reg, 'divu'),
OP_DIV: (_dis_generic_3reg, 'div'),
OP_LUI: (_dis_generic_1reg_imm, 'lui'),
OP_SLL: (_dis_generic_3reg, 'sll'),
OP_SLLI: (_dis_generic_2reg_imm, 'slli'),
OP_SRL: (_dis_generic_3reg, 'srl'),
OP_SRLI: (_dis_generic_2reg_imm, 'srli'),
OP_AND: (_dis_generic_3reg, 'and'),
OP_ANDI: (_dis_generic_2reg_imm, 'andi'),
OP_OR: (_dis_generic_3reg, 'or'),
OP_ORI: (_dis_generic_2reg_imm, 'ori'),
OP_NOR: (_dis_generic_3reg, 'nor'),
OP_XOR: (_dis_generic_3reg, 'xor'),
OP_LB: (_dis_load, 'lb'),
OP_LH: (_dis_load, 'lh'),
OP_LW: (_dis_load, 'lw'),
OP_LBU: (_dis_load, 'lbu'),
OP_LHU: (_dis_load, 'lhu'),
OP_SB: (_dis_store, 'sb'),
OP_SH: (_dis_store, 'sh'),
OP_SW: (_dis_store, 'sw'),
OP_JR: (_dis_generic_1reg, 'jr'),
OP_CALL: (_dis_call, 'call'),
OP_B: (_dis_generic_offset26, 'b'),
OP_BEQ: (_dis_branch, 'beq'),
OP_BNE: (_dis_branch, 'bne'),
OP_BGE: (_dis_branch, 'bge'),
OP_BGT: (_dis_branch, 'bgt'),
OP_BLE: (_dis_branch, 'ble'),
OP_BLT: (_dis_branch, 'blt'),
OP_BGEU: (_dis_branch, 'bgeu'),
OP_BGTU: (_dis_branch, 'bgtu'),
OP_BLEU: (_dis_branch, 'bleu'),
OP_BLTU: (_dis_branch, 'bltu'),
OP_ERET: (_dis_noop, 'eret'),
OP_HALT: (_dis_noop, 'halt'),
}
| {
"repo_name": "eliben/luz-cpu",
"path": "luz_asm_sim/lib/asmlib/disassembler.py",
"copies": "1",
"size": "5115",
"license": "unlicense",
"hash": -6198448017414548000,
"line_mean": 30.7701863354,
"line_max": 77,
"alpha_frac": 0.5792766373,
"autogenerated": false,
"ratio": 2.779891304347826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8800466187945164,
"avg_score": 0.011740350740532327,
"num_lines": 161
} |
# A disassembler for Luz.
# Converts binary words into mnemonic assembly instructions
#
# Luz micro-controller assembler
# Eli Bendersky (C) 2008-2010
#
import pprint, os, sys
from collections import defaultdict
from ..commonlib.utils import (
extract_bitfield, signed2int)
from ..commonlib.luz_opcodes import *
from .asm_instructions import register_alias_of
class DisassembleError(Exception): pass
def disassemble(word, replace_alias=False):
""" Given a word (32-bit integer) returns the mnemonic
assembly instruction it represents, as a string.
replace_alias:
If True, register numbers are replaced with their
aliases.
DisassembleError can be raised in case of errors.
"""
# the opcode
opcode = extract_bitfield(word, 31, 26)
regnamer = _reg_name_alias if replace_alias else _reg_name_normal
# dispatch
if opcode in _OP:
dispatch = _OP[opcode]
func, name = dispatch[0], dispatch[1]
return func(word, name, regnamer)
else:
raise DisassembleError('unknown opcode %X' % opcode)
##################################################################
def _reg_name_normal(regnum):
return '$r%s' % regnum
def _reg_name_alias(regnum):
return register_alias_of[regnum]
def _dis_generic_3reg(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
rt = extract_bitfield(word, 15, 11)
return '%s %s, %s, %s' % (name, regnamer(rd), regnamer(rs), regnamer(rt))
def _dis_generic_2reg_imm(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
imm16 = extract_bitfield(word, 15, 0)
return '%s %s, %s, 0x%X' % (name, regnamer(rd), regnamer(rs), imm16)
def _dis_generic_1reg_imm(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
imm16 = extract_bitfield(word, 15, 0)
return '%s %s, 0x%X' % (name, regnamer(rd), imm16)
def _dis_generic_1reg(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
return '%s %s' % (name, regnamer(rd))
def _dis_call(word, name, regnamer):
imm26 = extract_bitfield(word, 25, 0)
# annotate with the actual jump address (multiplied by 4)
return '%s 0x%X [0x%X]' % (name, imm26, imm26 * 4)
def _dis_generic_offset26(word, name, regnamer):
offset = signed2int(extract_bitfield(word, 25, 0))
return '%s %d' % (name, offset)
def _dis_load(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
offset = signed2int(extract_bitfield(word, 15, 0), nbits=16)
return '%s %s, %d(%s)' % (name, regnamer(rd), offset, regnamer(rs))
def _dis_store(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
offset = signed2int(extract_bitfield(word, 15, 0), nbits=16)
return '%s %s, %d(%s)' % (name, regnamer(rs), offset, regnamer(rd))
def _dis_noop(word, name, regnamer):
return '%s' % name
def _dis_branch(word, name, regnamer):
rd = extract_bitfield(word, 25, 21)
rs = extract_bitfield(word, 20, 16)
offset = signed2int(extract_bitfield(word, 15, 0), nbits=16)
return '%s %s, %s, %d' % (name, regnamer(rd), regnamer(rs), offset)
# Maps opcodes to functions that dissasemble them
#
_OP = {
OP_ADD: (_dis_generic_3reg, 'add'),
OP_ADDI: (_dis_generic_2reg_imm, 'addi'),
OP_SUB: (_dis_generic_3reg, 'sub'),
OP_SUBI: (_dis_generic_2reg_imm, 'subi'),
OP_MULU: (_dis_generic_3reg, 'mulu'),
OP_MUL: (_dis_generic_3reg, 'mul'),
OP_DIVU: (_dis_generic_3reg, 'divu'),
OP_DIV: (_dis_generic_3reg, 'div'),
OP_LUI: (_dis_generic_1reg_imm, 'lui'),
OP_SLL: (_dis_generic_3reg, 'sll'),
OP_SLLI: (_dis_generic_2reg_imm, 'slli'),
OP_SRL: (_dis_generic_3reg, 'srl'),
OP_SRLI: (_dis_generic_2reg_imm, 'srli'),
OP_AND: (_dis_generic_3reg, 'and'),
OP_ANDI: (_dis_generic_2reg_imm, 'andi'),
OP_OR: (_dis_generic_3reg, 'or'),
OP_ORI: (_dis_generic_2reg_imm, 'ori'),
OP_NOR: (_dis_generic_3reg, 'nor'),
OP_XOR: (_dis_generic_3reg, 'xor'),
OP_LB: (_dis_load, 'lb'),
OP_LH: (_dis_load, 'lh'),
OP_LW: (_dis_load, 'lw'),
OP_LBU: (_dis_load, 'lbu'),
OP_LHU: (_dis_load, 'lhu'),
OP_SB: (_dis_store, 'sb'),
OP_SH: (_dis_store, 'sh'),
OP_SW: (_dis_store, 'sw'),
OP_JR: (_dis_generic_1reg, 'jr'),
OP_CALL: (_dis_call, 'call'),
OP_B: (_dis_generic_offset26, 'b'),
OP_BEQ: (_dis_branch, 'beq'),
OP_BNE: (_dis_branch, 'bne'),
OP_BGE: (_dis_branch, 'bge'),
OP_BGT: (_dis_branch, 'bgt'),
OP_BLE: (_dis_branch, 'ble'),
OP_BLT: (_dis_branch, 'blt'),
OP_BGEU: (_dis_branch, 'bgeu'),
OP_BGTU: (_dis_branch, 'bgtu'),
OP_BLEU: (_dis_branch, 'bleu'),
OP_BLTU: (_dis_branch, 'bltu'),
OP_ERET: (_dis_noop, 'eret'),
OP_HALT: (_dis_noop, 'halt'),
}
| {
"repo_name": "8l/luz-cpu",
"path": "luz_asm_sim/lib/asmlib/disassembler.py",
"copies": "1",
"size": "5276",
"license": "unlicense",
"hash": -8289446628384067000,
"line_mean": 30.7701863354,
"line_max": 77,
"alpha_frac": 0.5615996967,
"autogenerated": false,
"ratio": 2.847274689692391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.390887438639239,
"avg_score": null,
"num_lines": null
} |
"""A discrete event simulation framework."""
import debug, config
class Event(object):
"""An occurrence initiated from within the simulation."""
def __init__(self, label, sender, bubble_time = 0):
assert bubble_time >= time
self.label = label
self.sender = sender
self.time = bubble_time
def __repr__(self):
return '%s (t = %.5f, s = %s)' % (self.label, self.time, self.sender)
class Dispatcher(object):
"""Allows functions to be called when an event occurs."""
def __init__(self):
self.listeners = {}
def register(self, event_label, listener):
"""Register a function to be called when an event occurs."""
if event_label not in self.listeners:
self.listeners[event_label] = []
self.listeners[event_label].append(listener)
def bubble(self, event):
"""Execute registered listeners. Do not call from outside."""
log_event(event)
if event.label not in self.listeners:
return
for listener in self.listeners[event.label]:
listener(event)
def log_event(event):
"""Print a log message to standard output when events occur."""
if event.label not in config.events_printed:
return
headers = [
('Time', '%d' % time),
(event.sender.__class__.__name__, event.label)
]
debug.print_object(event.sender, headers = headers)
time = 0
events = []
dispatcher = Dispatcher()
def init():
global time, events, dispatcher
time = 0
events = []
dispatcher = Dispatcher()
def run():
"""Enumerate events and bubble each until no more events exist.
Events live in the events list (sim.events), which can be altered at
will, including while the simulation is running.
"""
global time, events, dispatcher
# First fire a 'start sim' event just before the first actual event
if len(events) > 0:
event = min(events, key = lambda e: e.time)
t = event.time
else:
t = 0
dispatcher.bubble(Event('sim-start', None, t))
while len(events) > 0:
event = min(events, key = lambda e: e.time)
assert event.time >= time
time = event.time
events.remove(event)
dispatcher.bubble(event)
# Fire a 'sim-finish' event for any post processors (statistics etc).
dispatcher.bubble(Event('sim-finish', None, time)) | {
"repo_name": "mauzeh/formation-flight",
"path": "lib/sim.py",
"copies": "1",
"size": "2430",
"license": "mit",
"hash": -5596093529395623000,
"line_mean": 28.2891566265,
"line_max": 77,
"alpha_frac": 0.6115226337,
"autogenerated": false,
"ratio": 4.0365448504983386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5148067484198339,
"avg_score": null,
"num_lines": null
} |
"""A dispatcher for directory watcher events
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import os
from . import dirwatch_base
class DirWatcherDispatcher:
"""Dispatches directory watcher events to multiple handlers.
"""
__slots__ = (
'_dirwatcher',
'_configs',
)
def __init__(self, dirwatcher):
self._dirwatcher = dirwatcher
self._configs = []
self._dirwatcher.on_created = self._on_created
self._dirwatcher.on_deleted = self._on_deleted
self._dirwatcher.on_modified = self._on_modified
@property
def dirwatcher(self):
"""Gets the dirwatcher which this dispatcher is tied to.
"""
return self._dirwatcher
def register(self, path, events):
"""Registers a handler for a list of events at the given path.
"""
if path is None or not isinstance(events, dict):
return
self._configs.append({
'path': path,
'events': events
})
self._configs.sort(key=lambda x: x['path'], reverse=True)
def _trigger_handler(self, path, event):
"""Triggers a handler for the given path and event.
"""
watch_dir = os.path.dirname(path)
for config in self._configs:
if not fnmatch.fnmatch(watch_dir, config['path']):
continue
events = config['events']
if event not in events:
continue
func = events[event]
if callable(func):
func(path)
return
def _on_created(self, path):
"""Handles path created events from the directory watcher.
"""
self._trigger_handler(path, dirwatch_base.DirWatcherEvent.CREATED)
def _on_deleted(self, path):
"""Handles path deleted events from the directory watcher.
"""
self._trigger_handler(path, dirwatch_base.DirWatcherEvent.DELETED)
def _on_modified(self, path):
"""Handles path modified events from the directory watcher.
"""
self._trigger_handler(path, dirwatch_base.DirWatcherEvent.MODIFIED)
| {
"repo_name": "ceache/treadmill",
"path": "lib/python/treadmill/dirwatch/dirwatch_dispatcher.py",
"copies": "2",
"size": "2273",
"license": "apache-2.0",
"hash": -4237509873575289300,
"line_mean": 27.7721518987,
"line_max": 75,
"alpha_frac": 0.5974483062,
"autogenerated": false,
"ratio": 4.346080305927342,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5943528612127342,
"avg_score": null,
"num_lines": null
} |
## Aditya Gilra, NCBS, Bangalore, 2012
"""
Inside the .../moose-examples/CA1PyramidalCell/ directory supplied with MOOSE, run
python CA1.py <filename>
(if no filename is specified, the single compartment CA1 cell is used.)
(supporting channels and different morph xml files are already present in this same directory).
The soma name below is hard coded for CA1, else any other file can be used by modifying this script.
"""
import os
os.environ['NUMPTHREADS'] = '1'
import moose
from moose.utils import *
from moose.neuroml.NeuroML import NeuroML
from pylab import *
simdt = 10e-6 # s
plotdt = 10e-6 # s
runtime = 0.2 # s
def loadGran98NeuroML_L123(filename):
neuromlR = NeuroML()
populationDict, projectionDict = \
neuromlR.readNeuroMLFromFile(filename)
soma_path = populationDict['CA1group'][1][0].path+'/Seg0_soma_0_0'
somaVm = setupTable('somaVm',moose.Compartment(soma_path),'Vm')
#somaCa = setupTable('somaCa',moose.CaConc(soma_path+'/Gran_CaPool_98'),'Ca')
#somaIKCa = setupTable('somaIKCa',moose.HHChannel(soma_path+'/Gran_KCa_98'),'Gk')
#KDrX = setupTable('ChanX',moose.HHChannel(soma_path+'/Gran_KDr_98'),'X')
soma = moose.Compartment(soma_path)
print "Reinit MOOSE ... "
resetSim(['/elec','/cells'],simdt,plotdt,simmethod='ee') # from moose.utils
print "Running ... "
moose.start(runtime)
tvec = arange(0.0,runtime,simdt)
plot(tvec,somaVm.vector[1:])
title('Soma Vm')
xlabel('time (s)')
ylabel('Voltage (V)')
print "Showing plots ..."
show()
if __name__ == "__main__":
if len(sys.argv)<2:
filename = "CA1soma.net.xml"
else:
filename = sys.argv[1]
loadGran98NeuroML_L123(filename)
| {
"repo_name": "h-mayorquin/camp_india_2016",
"path": "tutorials/chemical switches/moose/neuroml/CA1PyramidalCell/CA1.py",
"copies": "1",
"size": "1710",
"license": "mit",
"hash": -5365642802375636000,
"line_mean": 32.5294117647,
"line_max": 100,
"alpha_frac": 0.6783625731,
"autogenerated": false,
"ratio": 2.789559543230016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3967922116330016,
"avg_score": null,
"num_lines": null
} |
## Aditya Gilra, NCBS, Bangalore, 2012
"""
Inside the .../moose-examples/GranuleCell/ directory supplied with MOOSE, run
python testNeuroML_Gran98.py
(other channels and morph xml files are already present in this same directory).
The soma name below is hard coded for gran98, else any other file can be used by modifying this script.
"""
import os
os.environ['NUMPTHREADS'] = '1'
import sys
sys.path.append('../../../python')
import moose
from moose.utils import *
from moose.neuroml.NeuroML import NeuroML
from pylab import *
simdt = 25e-6 # s
plotdt = 25e-6 # s
runtime = 0.7 # s
def loadGran98NeuroML_L123(filename):
neuromlR = NeuroML()
populationDict, projectionDict = \
neuromlR.readNeuroMLFromFile(filename)
soma_path = populationDict['Gran'][1][0].path+'/Soma_0'
somaVm = setupTable('somaVm',moose.Compartment(soma_path),'Vm')
somaCa = setupTable('somaCa',moose.CaConc(soma_path+'/Gran_CaPool_98'),'Ca')
somaIKC = setupTable('somaIKC',moose.HHChannel(soma_path+'/KC_CML'),'Gk')
somaIKCa = setupTable('somaIKCa',moose.HHChannel(soma_path+'/Gran_KCa_98'),'Gk')
#KDrX = setupTable('ChanX',moose.HHChannel(soma_path+'/Gran_KDr_98'),'X')
soma = moose.Compartment(soma_path)
print "Reinit MOOSE ... "
resetSim(['/elec','/cells'],simdt,plotdt,simmethod='ee') # from moose.utils
print "Running ... "
moose.start(runtime)
tvec = arange(0.0,runtime,simdt)
plot(tvec,somaVm.vector[1:])
title('Soma Vm')
xlabel('time (s)')
ylabel('Voltage (V)')
figure()
plot(tvec,somaCa.vector[1:])
title('Soma Ca')
xlabel('time (s)')
ylabel('Ca conc (mol/m^3)')
figure()
plot(tvec,somaIKCa.vector[1:])
title('KCa current (A)')
xlabel('time (s)')
ylabel('')
figure()
plot(tvec,somaIKC.vector[1:])
title('KC current (A)')
xlabel('time (s)')
ylabel('')
print "Showing plots ..."
show()
filename = "allChannelsCell.net.xml"
if __name__ == "__main__":
if len(sys.argv)<2:
filename = "allChannelsCell.net.xml"
else:
filename = sys.argv[1]
loadGran98NeuroML_L123(filename)
| {
"repo_name": "h-mayorquin/camp_india_2016",
"path": "tutorials/chemical switches/moose/neuroml/allChannelsCell/allChannelsCell.py",
"copies": "1",
"size": "2130",
"license": "mit",
"hash": -2201605458879197700,
"line_mean": 30.3235294118,
"line_max": 103,
"alpha_frac": 0.6530516432,
"autogenerated": false,
"ratio": 2.7806788511749345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39337304943749346,
"avg_score": null,
"num_lines": null
} |
## Aditya Gilra, NCBS, Bangalore, 201
"""
Inside the .../moose-examples/CA1PyramidalCell/ directory supplied with MOOSE, run
python testNeuroML_CA1.py
(other channels and morph xml files are already present in this same directory).
The soma name below is hard coded for CA1, else any other file can be used by modifying this script.
"""
import moose
from moose.utils import *
from moose.neuroml.NeuroML import NeuroML
from pylab import *
simdt = 10e-6 # s
plotdt = 10e-6 # s
runtime = 0.2 # s
cells_path = '/cells' # neuromlR.readNeuroMLFromFile creates cells in '/cells'
def loadGran98NeuroML_L123(filename,params):
neuromlR = NeuroML()
populationDict, projectionDict = \
neuromlR.readNeuroMLFromFile(filename,params=params)
print "Number of compartments =",\
len(moose.Neuron(populationDict['CA1group'][1][0].path).children)
soma_path = populationDict['CA1group'][1][0].path+'/Seg0_soma_0_0'
somaVm = setupTable('somaVm',moose.Compartment(soma_path),'Vm')
#somaCa = setupTable('somaCa',moose.CaConc(soma_path+'/Gran_CaPool_98'),'Ca')
#somaIKCa = setupTable('somaIKCa',moose.HHChannel(soma_path+'/Gran_KCa_98'),'Gk')
#KDrX = setupTable('ChanX',moose.HHChannel(soma_path+'/Gran_KDr_98'),'X')
soma = moose.Compartment(soma_path)
print "Reinit MOOSE ... "
resetSim(['/elec','/cells'],simdt,plotdt,simmethod='hsolve') # from moose.utils
print "Running ... "
moose.start(runtime)
tvec = arange(0.0,runtime,simdt)
plot(tvec,somaVm.vector[1:])
title('Soma Vm')
xlabel('time (s)')
ylabel('Voltage (V)')
print "Showing plots ..."
show()
if __name__ == "__main__":
if len(sys.argv)<2:
filename = "CA1soma.net.xml"
params = {}
else:
filename = sys.argv[1]
params = {}
if len(sys.argv)>2:
params = {'combineSegments':bool(sys.argv[2])}
# sys.argv[2] should be True or False
loadGran98NeuroML_L123(filename,params)
| {
"repo_name": "h-mayorquin/camp_india_2016",
"path": "tutorials/chemical switches/moose/neuroml/CA1PyramidalCell/CA1_hsolve.py",
"copies": "1",
"size": "1987",
"license": "mit",
"hash": 4347910332733903400,
"line_mean": 33.8596491228,
"line_max": 100,
"alpha_frac": 0.6597886261,
"autogenerated": false,
"ratio": 2.867243867243867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4027032493343867,
"avg_score": null,
"num_lines": null
} |
adj_1 = input("Please type an adjective: ")
obj_1 = input("Please type an object: ")
pronoun_1 = input("Please type a pronoun: ")
body_part_1 = input("Please type a body part: ")
print("The trail of life persistently is " + adj_1 + " without a " + obj_1 + " to light my darkened way; instead " + pronoun_1 + " march, undaunted and unfazed, to follow paths that my " + body_part_1 + " surveys. ")
pronoun_2 = input("Please type a pronoun: ")
body_part_2 = input("Please type a body part: ")
adj_2 = input("Please type an adjective: ")
print("" + pronoun_2 + " know not where this drumming in my " + body_part_2 + " will lead, for I'm " + adj_2 + " of maps or charts.")
adj_3 = input("Please type an adjective: ")
noun_1 = input("Please type a noun: ")
print("The only knowledge of this " + adj_3 + " quest I hold is where the " + noun_1 + " starts.")
noun_2 = input("Please type a noun: ")
noun_3 = input("Please type a noun: ")
verb_1 = input("Please type a verb: ")
noun_4 = input("Please type a noun: ")
verb_2 = input("Please type a verb: ")
print("The " + noun_2 + " claim my life is tossed away on " + noun_3 + " conforming simpletons and fools. If Father Time should " + verb_1 + " me as they say, I will freely " + noun_4 + " and " + verb_2 + " to his rule.")
verb_3 = input("Please type a verb: ")
noun_5 = input("Please type a noun: ")
verb_4 = input("Please type a verb: ")
print("So " + verb_3 + " me not, I will " + noun_5 + " at any cost; for those who " + verb_4 + " are not always lost.")
| {
"repo_name": "glors131/Code-Projects",
"path": "Madlib1.py",
"copies": "1",
"size": "1544",
"license": "mit",
"hash": 7736748082466308000,
"line_mean": 47.8064516129,
"line_max": 221,
"alpha_frac": 0.6230569948,
"autogenerated": false,
"ratio": 2.7971014492753623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39201584440753623,
"avg_score": null,
"num_lines": null
} |
def ad2_qubo(qubo_out,adj_in):
import csv
max_states=1000
qubo_Records=[0] * max_states
states_records=[0] * max_states
states_adj=[0] * max_states
states_couplers=[0] * max_states
states=[0]*max_states
states_num=[0]*max_states
qubo = open (qubo_out,"w")
adj = open (adj_in,"r")
adj_lines = []
num_states = 0
for line in adj:
if line[0]=='c' or line[0]=='#':
continue
adj_lines.append(line.strip('\n'))
num_states += 1
num_nodes=0
num_couplers=0
for i in range ( num_states ):
states_records[i]=adj_lines[i].split(',')
states_adj[i]=len(states_records[i]) -1
states[i]="".join(map(str,states_records[i][:1]))
states_records[i]=[ it for it in states_records[i] if it != states[i]]
num_nodes+=1
for i in range ( num_states ):
state_str="".join(map(str,states[i]))
states_num[i]=i
for k in range ( num_states ):
states_records[k]=[ it if it != state_str else i for it in states_records[k] ]
states_records[k]=[ it for it in states_records[k] if it > k ]
states_couplers[i]=len(states_records[i])
num_couplers+=states_couplers[i]
num_couplers=num_couplers*4+num_states*6
num_nodes=num_nodes*4
# now time to write the qubo
# nodes first
qubo.write("c"+"\n")
qubo.write("c this qubo was created by adj2qubo.py for 4 color uninary encoding"+"\n")
qubo.write("c"+"\n")
qubo.write("p qubo 0 " + str(num_nodes) + " " + str(num_nodes) + " " + str(num_couplers) + "\n" )
for st in range(num_states):
qubo.write("c " + states[st] + "\n")
qubo.write(" "+str(st*4) + " " + str(st*4) + " -1 "+ "\n" )
qubo.write(" "+str(st*4+1) + " " + str(st*4+1) + " -1 "+ "\n" )
qubo.write(" "+str(st*4+2) + " " + str(st*4+2) + " -1 "+ "\n" )
qubo.write(" "+str(st*4+3) + " " + str(st*4+3) + " -1 "+ "\n" )
qubo.write("c"+"\n")
qubo.write("c Couplers "+"\n")
qubo.write("c"+"\n")
for st in range(num_states):
qubo.write("c " + states[st] + " "+str(states_adj[st])+" neighbors "+str(states_couplers[st]*4)+" external couplers\n")
qubo.write(" "+str(st*4) + " " + str(st*4+1) + " 2 "+ "\n" )
qubo.write(" "+str(st*4) + " " + str(st*4+2) + " 2 "+ "\n" )
qubo.write(" "+str(st*4) + " " + str(st*4+3) + " 2 "+ "\n" )
qubo.write(" "+str(st*4+1) + " " + str(st*4+2) + " 2 "+ "\n" )
qubo.write(" "+str(st*4+1) + " " + str(st*4+3) + " 2 "+ "\n" )
qubo.write(" "+str(st*4+2) + " " + str(st*4+3) + " 2 "+ "\n" )
for ext_coup in range(states_couplers[st]) :
coupl_st=states_records[st][ext_coup]
qubo.write("c " + states[st] + " linked to "+states[coupl_st]+ "\n")
qubo.write(" "+str(st*4+0) + " " + str(coupl_st*4+0) + " 1 "+ "\n" )
qubo.write(" "+str(st*4+1) + " " + str(coupl_st*4+1) + " 1 "+ "\n" )
qubo.write(" "+str(st*4+2) + " " + str(coupl_st*4+2) + " 1 "+ "\n" )
qubo.write(" "+str(st*4+3) + " " + str(coupl_st*4+3) + " 1 "+ "\n" )
adj.close()
qubo.close()
# end the thing
if __name__ == "__main__":
import argparse
import os
parser = argparse.ArgumentParser(description='Read adj files and create a qubo 4 color map file')
parser.add_argument("-i","--adj", help="Input adjacency graph file ",required=True)
parser.add_argument("-o","--qubo" , type=str, help="output .qubo file",required="True")
parser.add_argument("-v","--verbosity",action="store_true",help="Verbosity level",default=0)
args = parser.parse_args()
adj_in=args.adj
qubo_out=args.qubo
ad2_qubo(qubo_out,adj_in)
| {
"repo_name": "tepl/qbsolv",
"path": "examples/mapColoringUSStates/adj2qubo.py",
"copies": "1",
"size": "4432",
"license": "apache-2.0",
"hash": 7245484955423801000,
"line_mean": 37.2068965517,
"line_max": 130,
"alpha_frac": 0.5435469314,
"autogenerated": false,
"ratio": 2.7786833855799373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38222303169799376,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.