text stringlengths 0 1.05M | meta dict |
|---|---|
import sys
import os
from OMPython import OMCSession
# Start OMC session and load MSL
omc = OMCSession()
omc.sendExpression("loadModel(Modelica)")
# Load OpenIPSL and get all models
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\OpenIPSL\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\_Tutorial\Tutorial\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\AKD\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\IEEE9\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\IEEE14\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\KundurSMIB\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\N44\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\PSAT_Systems\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\SevenBus\package.mo")')
omc.sendExpression('loadFile("C:\dev\OSS\OpenIPSL_Repo\Application Examples\TwoAreas\package.mo")')
list_models = omc.sendExpression('getClassNames(OpenIPSL,recursive=true)')
list_models_1 = omc.sendExpression('getClassNames(Tutorial,recursive=true)')
list_models_2 = omc.sendExpression('getClassNames(AKD,recursive=true)')
list_models_3 = omc.sendExpression('getClassNames(IEEE9,recursive=true)')
list_models_4 = omc.sendExpression('getClassNames(IEEE14,recursive=true)')
list_models_5 = omc.sendExpression('getClassNames(KundurSMIB,recursive=true)')
list_models_6 = omc.sendExpression('getClassNames(N44,recursive=true)')
list_models_7 = omc.sendExpression('getClassNames(PSAT_Systems,recursive=true)')
list_models_8 = omc.sendExpression('getClassNames(SevenBus,recursive=true)')
list_models_9 = omc.sendExpression('getClassNames(TwoAreas,recursive=true)')
list_models = list_models + list_models_1 + list_models_2 + list_models_3 + list_models_4 + list_models_5 + list_models_6 + list_models_7 + list_models_8 + list_models_9
newCopyrightFile = open('copyrightStatement')
newCopyright = newCopyrightFile.read()
# Loop on all models:
for model in list_models:
# Get the current annotation
anno = omc.sendExpression("getDocumentationAnnotation(%s)" % (model))
info = anno[0].replace('"', '\\"')
cmdString = 'setDocumentationAnnotation(%s,info="%s",revisions="%s")' % (model, info, newCopyright)
try:
omc.sendExpression(cmdString)
omc.sendExpression("save(%s)" % model)
print 'Model %s updated' % model
except:
print cmdString
print model
print info
raise | {
"repo_name": "fran-jo/OpenIPSL",
"path": "Support/addCopyright.py",
"copies": "4",
"size": "2756",
"license": "mpl-2.0",
"hash": -6566674633318916000,
"line_mean": 49.1272727273,
"line_max": 169,
"alpha_frac": 0.7550798258,
"autogenerated": false,
"ratio": 3.149714285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5904794111514287,
"avg_score": null,
"num_lines": null
} |
"""Apply filters to a level or CustomObject."""
import os
import sys
import argparse
from contextlib import contextmanager
from distance import DefaultClasses
from distance.levelobjects import LevelObject
from distance.filter import getfilter
from distance.printing import PrintContext
level_objects = DefaultClasses.level_objects
def filterlevel_getfilter(name):
if name == 'file':
return FileFilter
return getfilter(name)
@contextmanager
def optcontext(obj, func, *args, **kw):
if obj is None:
yield
else:
with getattr(obj, func)(*args, **kw) as o:
yield o
def apply_filters(filters, content, p=None, **kw):
if p:
p(f"Filters: {len(filters)}")
with optcontext(p, 'tree_children', count=len(filters)):
for f in filters:
if p:
p.tree_next_child()
p(f"Filter: {f.__def_string}")
if not f.apply(content, p=p, **kw):
return False
return True
class FileFilter(object):
@classmethod
def add_args(cls, parser):
parser.add_argument(":src", help="File containing the filter definitions.")
parser.add_argument(":relative_to", help="Path that src is relative to (used internally).")
parser.description = "Load a filter chain from file."
parser.epilog = """
Filter files consist any number of filters, one per line.
Filters are formatted as per the -o/--of/--objfilter argument.
Empty lines and lines starting with '#' are ignored.
"""
def __init__(self, args):
src = args.src
relative_to = args.__dict__.pop('relative_to', None) or '.'
if not src.startswith('/'):
src = os.path.join(relative_to, src)
abssrc = os.path.abspath(src)
self.src = os.path.relpath(src)
def create(l):
defaults = dict(relative_to=os.path.dirname(abssrc), **args.__dict__)
return create_filter(l, defaults)
with open(abssrc, 'r') as f:
self.filters = [create(l) for l in map(str.strip, f)
if l and not l.startswith('#')]
self.aborted = False
def apply(self, content, p=None, **kw):
if p:
p(f"File: {self.src!r}")
apply_filters(self.filters, content, p=p)
return True
def make_arglist(s):
def iter_tokens(source):
if not source:
return
token = []
escape = False
for char in source:
if escape:
escape = False
token.append(char)
elif char == '\\':
escape = True
elif char == ':':
yield token
token = []
else:
token.append(char)
yield token
return [":" + ''.join(token) for token in iter_tokens(s)]
def create_filter(option, defaults):
name, sep, argstr = option.partition(':')
cls = filterlevel_getfilter(name)
parser = argparse.ArgumentParser(prog=name, prefix_chars=':',
add_help=False)
parser.add_argument(':help', action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
parser.set_defaults(**defaults)
cls.add_args(parser)
args = parser.parse_args(make_arglist(argstr))
flt = cls(args)
flt.__def_string = option
return flt
def main():
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument("-f", "--force", action='store_true',
help="Allow overwriting OUT file.")
parser.add_argument("-l", "--maxrecurse", type=int, default=-1,
help="Maximum recursion depth.")
parser.add_argument("-o", "--of", "--objfilter", dest='objfilters',
action='append', default=[],
help="Specify a filter option.")
parser.add_argument("--list", action='store_true',
help="Dump result listing.")
parser.add_argument("IN", nargs='?',
help="Level .bytes filename.")
parser.add_argument("OUT", nargs='?',
help="output .bytes filename.")
args = parser.parse_args()
defaults = dict(maxrecurse=args.maxrecurse)
filters = [create_filter(f, defaults) for f in args.objfilters]
if args.IN is None:
print(f"{parser.prog}: No input file specified.", file=sys.stderr)
return 1
if args.OUT is None:
print(f"{parser.prog}: No output file specified.", file=sys.stderr)
return 1
write_mode = 'xb'
if args.force:
write_mode = 'wb'
elif args.OUT != '-' and os.path.exists(args.OUT):
print(f"{parser.prog}: file {args.OUT} exists."
" pass -f to force.", file=sys.stderr)
return 1
if args.IN == '-':
from io import BytesIO
srcarg = BytesIO(sys.stdin.buffer.read())
else:
srcarg = args.IN
content = DefaultClasses.level_like.read(srcarg)
is_wrapped = False
if isinstance(content, LevelObject) and content.type != 'Group':
is_wrapped = True
content = DefaultClasses.level_objects.create('Group', children=[content])
p = PrintContext(file=sys.stderr, flags=('groups', 'subobjects'))
if not apply_filters(filters, content, p=p):
return 1
if is_wrapped and len(content.children) == 1:
content = content.children[0]
if args.list:
p.print_object(content)
print("writing...", file=sys.stderr)
if args.OUT == '-':
destarg = sys.stdout.buffer
else:
destarg = args.OUT
n = content.write(destarg, write_mode=write_mode)
print(f"{n} bytes written", file=sys.stderr)
return 0
if __name__ == '__main__':
exit(main())
# vim:set sw=4 ts=8 sts=4 et:
| {
"repo_name": "ferreum/distanceutils",
"path": "distance_scripts/filterlevel.py",
"copies": "1",
"size": "5909",
"license": "mit",
"hash": -5196361615892881000,
"line_mean": 28.9949238579,
"line_max": 99,
"alpha_frac": 0.5716703334,
"autogenerated": false,
"ratio": 4.016995241332427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005330505222398293,
"num_lines": 197
} |
# Applying Machine Learning to entiment Analysis
'''
In this chapter, we will delve into a subfield of natural language processing (NLP) called sentiment analysis
and learn how to use machine learning algorithms to classify documents based on their polarity: the attitude
of the writer. The topics will cover in the following sections include:
. Cleaning and preparing text data
. Building feature vectors from text documents
. Training a machine learning model to classify positive and negative movie reviews
. Working with large text datasets using out-of-core learning
'''
import pyprind
import pandas as pd
import os
pbar = pyprind.ProgBar(50000)
labels = {'pos':1, 'neg':0}
df = pd.DataFrame()
for s in ('test', 'train'):
for l in ('pos', 'neg'):
path = 'C:\Users\Wei\Downloads/aclImdb\%s/%s' % (s, l)
for file in os.listdir(path):
with open(os.path.join(path, file), 'r') as infile:
txt = infile.read()
df = df.append([[txt, labels[l]]], ignore_index=True)
pbar.update()
df.columns = ['review', 'sentiment']
import numpy as np
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
df.to_csv('C:\Users\Wei\Downloads/aclImdb\movie_data.csv', index=False)
df = pd.read_csv('C:\Users\Wei\Downloads/aclImdb\movie_data.csv')
df.head(3)
# Introducing the bag-of-words model
# Transforming words into feature vectors
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer()
docs = np.array([
'The sun is shining',
'The weather is sweer',
'The sun is shining and the weather is sweet'])
bag = count.fit_transform(docs)
print count.vocabulary_
print bag.toarray()
# Accessing word relevancy via term frequency-inverse document frequency
from sklearn.feature_extraction.text import TfidfTransformer
tfidf = TfidfTransformer()
np.set_printoptions(precision=2)
print tfidf.fit_transform(count.fit_transform(docs)).toarray()
# Cleaning text data
df.loc[0, 'review'][-50:]
import re
def preprocessor(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text)
text = re.sub('[\W]+', ' ', text.lower()) + \
' '.join(emoticons).replace('-', '')
return text
preprocessor(df.loc[0, 'review'][-50:])
preprocessor("</a>This :) is :( a test :-)!")
df['review'] = df['review'].apply(preprocessor)
# Processing documents into tokens
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
def tokenizer(text):
return text.split()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
tokenizer('runners like running and thus they run')
tokenizer_porter('runners like running and thus they run')
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = stopwords.words('english')
[w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:]
if w not in stop]
# Traning a logistic regression model for document classification
X_train = df.loc[:25000, 'review'].values
y_train = df.loc[:25000, 'sentiment'].values
X_test = df.loc[25000:, 'review'].values
y_test = df.loc[25000:, 'sentiment'].values
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None)
param_grid = [{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]},
{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]},
]
lr_tfidf = Pipeline([('vect', tfidf),
('clf', LogisticRegression(random_state=0))])
gs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid,
scoring='accuracy',
cv=5,
verbose=1,
n_jobs=-1)
gs_lr_tfidf.fit(X_train, y_train)
print('Best parameter set: %s ' % gs_lr_tfidf.best_params_)
print('CV Accuracy: %.3f' % gs_lr_tfidf.best_score_)
clf = gs_lr_tfidf.best_estimator_
print('Test Accuracy: %.3f' % clf.score(X_test, y_test))
# Working with bigger data - online algorithms and out-of-core learning
import numpy as np
import re
from nltk.corpus import stopwords
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) +\
' '.join(emoticons).replace('-', '')
tokenized = [w for w in text.split() if w not in stop]
return tokenized
def stream_docs(path):
with open(path, 'r', encoding='utf-8') as csv:
next(csv) # skip header
for line in csv:
text, label = line[:-3], int(line[-2])
yield text, label
next(stream_docs(path='C:\Users\Wei\Downloads/aclImdb\movie_data.csv'))
def get_minibatch(doc_stream, size):
docs, y = [], []
try:
for _ in range(size):
text, label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs, y
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
doc_stream = stream_docs(path='./movie_data.csv')
import pyprind
pbar = pyprind.ProgBar(45)
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = get_minibatch(doc_stream, size=1000)
if not X_train:
break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
pbar.update()
X_test, y_test = get_minibatch(doc_stream, size=5000)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test))
clf = clf.partial_fit(X_test, y_test)
| {
"repo_name": "wei-Z/Python-Machine-Learning",
"path": "self_practice/Chapter 8 Applying Machine Learning to Sentiment Analysis.py",
"copies": "1",
"size": "6663",
"license": "mit",
"hash": 6673447254264849000,
"line_mean": 31.9900990099,
"line_max": 110,
"alpha_frac": 0.621641903,
"autogenerated": false,
"ratio": 3.3753799392097266,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44970218422097263,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cvxpy as cp
import networkx as nx
import pyprobml_utils as pml
url = 'https://raw.githubusercontent.com/empathy87/The-Elements-of-Statistical-Learning-Python-Notebooks/master/data/protein.data'
df = pd.read_csv(url, header=None, sep=' ')
X = df.to_numpy()
protein_names = ['Raf', 'Mek', 'Plcg', 'PIP2', 'PIP3', 'Erk', 'Akt', 'PKA', 'PKC', 'P38', 'Jnk']
p = len(protein_names)
# the empirical covariance matrix
S = np.cov(X, rowvar=False)/1000
lambdas = [36, 27, 7]
theta_estimates = []
# In practice it is informative to examine the different sets of graphs that are obtained as λ is varied. Figure shows 4 different
# solutions. The graph becomes more sparse as the penalty parameter is increased.
for lam in lambdas:
# theta should be symmetric positive-definite
theta = cp.Variable(shape=(p, p), PSD=True)
# An alternative formulation of the problem () can be posed,
# where we don't penalize the diagonal of theta.
l1_penalty = sum([cp.abs(theta[i, j])
for i in range(p)
for j in range(p) if i != j])
objective = cp.Maximize(
cp.log_det(theta) - cp.trace(theta@S) - lam*l1_penalty)
problem = cp.Problem(objective)
problem.solve()
if problem.status != cp.OPTIMAL:
raise Exception('CVXPY Error')
theta_estimates.append(theta.value)
lambdas.append(0)
theta_estimates.append(np.linalg.inv(S))
# Four different graphical-lasso solutions for the flow-cytometry data.
tmp = {name: name for name in protein_names}
#fig, axarr = plt.subplots(2, 2, figsize=(6, 6), dpi=150)
#plt.subplots_adjust(wspace=0.1, hspace=0.1)
angles = np.linspace(0, 1, p + 1)[:-1] * 2 * np.pi + np.pi/2
for plot_idx in range(4):
cons = np.argwhere(np.abs(theta_estimates[plot_idx]) > 0.00001)
G, node_pos = nx.Graph(), {}
for i, node in enumerate(protein_names):
G.add_node(node)
node_pos[node] = np.array([np.cos(angles[i]), np.sin(angles[i])])
for i in range(cons.shape[0]):
G.add_edge(protein_names[cons[i, 0]], protein_names[cons[i, 1]])
#ax = axarr[plot_idx//2, plot_idx % 2]
fig, ax = plt.subplots()
nx.draw(G, node_pos, node_size=3, with_labels=False, ax=ax,
edge_color='#174A7E', width=0.6, node_color='#174A7E')
description = nx.draw_networkx_labels(G, node_pos, labels=tmp, ax=ax)
for (i, (node, t)) in enumerate(description.items()):
t.set_position((np.cos(angles[i]), np.sin(angles[i])+0.08))
t.set_fontsize(7)
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
ax.text(0, 1.18, f'λ = {lambdas[plot_idx]}', fontsize=8)
plt.tight_layout()
pml.savefig(f'ggm_lasso{plot_idx}.pdf')
plt.show()
| {
"repo_name": "probml/pyprobml",
"path": "scripts/ggm_lasso_demo.py",
"copies": "1",
"size": "3080",
"license": "mit",
"hash": -5806677321704314000,
"line_mean": 40.04,
"line_max": 152,
"alpha_frac": 0.6650422352,
"autogenerated": false,
"ratio": 2.8212648945921175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39863071297921177,
"avg_score": null,
"num_lines": null
} |
"""Apply K-folds CV for some model define in ch3 and ch4.
Note:All CV get approximately result as book, but most CV use one standard rule can not get same best alpha with book.
If I use CV with randomize train data, I can get the result as same as book most time.
"""
import numpy as np
from ..ch3.models import *
from ..ch4.models import *
DIRECTION_LEFT = 'left'
DIRECTION_RIGHT = 'right'
class BaseCV:
_bound_model = None
# name that model use. ex, PCR use `m`, Ridge use `alpha`.
_cv_field_name = 'alpha'
_inc_regularization_direction = DIRECTION_LEFT
_one_standard_rule = True
def __init__(self, train_x, train_y, features_name=None, do_standardization=True,
k_folds=10, alphas=None, random=False, select_train_method='step', **kwargs):
self.train_x = train_x
self.train_y = train_y
self.k_folds = k_folds
self.alphas = alphas
self.random = random
self.select_train_method = select_train_method
self.kwargs = kwargs
self.kwargs['features_name'] = features_name
self.kwargs['do_standardization'] = do_standardization
def pre_processing(self):
"""Provide same API as Model, we split data to K folds here.
"""
if self.random:
mask = np.random.permutation(self.train_x.shape[0])
train_x = self.train_x[mask]
train_y = self.train_y[mask]
else:
train_x = self.train_x[:]
train_y = self.train_y[:]
if self.select_train_method == 'step':
self.x_folds = [train_x[i::self.k_folds] for i in range(0, self.k_folds)]
self.y_folds = [train_y[i::self.k_folds] for i in range(0, self.k_folds)]
else:
self.x_folds = np.array_split(train_x, self.k_folds)
self.y_folds = np.array_split(train_y, self.k_folds)
# for i in range(self.k_folds):
# self.x_folds[i] = self.train_x[0] + self.x_folds[i] + self.train_x[-1]
# self.y_folds[i] = self.train_y[0] + self.y_folds[i] + self.train_y[-1]
@staticmethod
def combine_train_folds(folds, exclude):
"""
:return a matrix combine folds exclude specify index
"""
train_list = folds[:exclude]
train_list.extend(folds[exclude + 1:])
mat = np.concatenate(train_list)
return mat
def _model_test(self, model, cv_x, cv_y):
return model.test(cv_x, cv_y).mse
def train(self):
"""
calculate cv error and cv std error, then use one standard error to choose best alpha.
reference
-----------
http://www.stat.cmu.edu/~ryantibs/datamining/lectures/19-val2-marked.pdf
http://www.stat.cmu.edu/~ryantibs/datamining/lectures/18-val1-marked.pdf
"""
alphas = self.alphas
alpha_errs = np.zeros((len(alphas), 1)).flatten()
alpha_std_errs = np.zeros((len(alphas), 1)).flatten()
for idx, alpha in enumerate(alphas):
err = np.zeros((self.k_folds, 1)).flatten()
foldwise_err = np.zeros((self.k_folds, 1)).flatten()
for k in range(self.k_folds):
train_x = self.combine_train_folds(self.x_folds, exclude=k)
train_y = self.combine_train_folds(self.y_folds, exclude=k)
cv_x = self.x_folds[k]
cv_y = self.y_folds[k]
kwargs = self.kwargs.copy()
kwargs[self._cv_field_name] = alpha
model = self._bound_model(train_x, train_y, **kwargs)
model.pre_processing()
model.train()
foldwise_err[k] = self._model_test(model, cv_x, cv_y)
err[k] = self._model_test(model, cv_x, cv_y) * cv_x.shape[0]
std_err = (np.var(foldwise_err) **0.5) / (self.k_folds**0.5)
print('err', err)
# std_err = foldwise_err.std() / (self.k_folds**0.5)
# tot_err = sum(err) / (self.train_x.shape[0])
tot_err = sum(err) / sum(len(x) for x in self.x_folds)
alpha_std_errs[idx] = std_err
alpha_errs[idx] = tot_err
if self._one_standard_rule:
# use one standard error rule to find best alpha
alpha_hat_idx = alpha_errs.argmin()
# we move alpha for cease the (cv)_alpha <= (cv)_alpha_hat + (cv_std)_alpha_hat
cv_hat = alpha_errs[alpha_hat_idx] + alpha_std_errs[alpha_hat_idx]
if self._inc_regularization_direction is DIRECTION_LEFT:
move_direction = reversed(range(0, alpha_hat_idx+1))
else:
move_direction = range(alpha_hat_idx, len(alphas))
print('alphas len', len(alphas))
print('alpha_hat idx', alpha_hat_idx)
print('cv hat', cv_hat)
self.best_alpha = -1
# find the best_alpha
last_idx = None
for idx in move_direction:
if (alpha_errs[idx] > cv_hat) and (last_idx and alpha_errs[last_idx] <= cv_hat):
self.best_alpha = alphas[last_idx]
#break
last_idx = idx
else:
self.best_alpha = alphas[alpha_errs.argmin()]
self.alpha_errs = alpha_errs
self.alpha_std_errs = alpha_std_errs
kwargs = self.kwargs.copy()
kwargs[self._cv_field_name] = self.best_alpha
model = self._bound_model(self.train_x, self.train_y, **kwargs)
model.pre_processing()
model.train()
self.model = model
def __getattr__(self, name):
# make a proxy for self.model, when call method not define in CV model, use the method find in self.model.
return getattr(self.model, name)
class BaseLogisticCV(BaseCV):
def _model_test(self, model, cv_x, cv_y):
return model.test(cv_x, cv_y).error_rate
class RidgeCV(BaseCV):
_bound_model = RidgeModel
_inc_regularization_direction = DIRECTION_LEFT
class PCRCV(BaseCV):
_bound_model = PrincipalComponentsRegression
_cv_field_name = 'm'
_inc_regularization_direction = DIRECTION_LEFT
class PartialLeastSquareCV(BaseCV):
_bound_model = PartialLeastSquare
_cv_field_name = 'M'
_inc_regularization_direction = DIRECTION_LEFT
class BestSubsetSelectionCV(BaseCV):
_bound_model = BestSubsetSelection
_cv_field_name = 'k'
_inc_regularization_direction = DIRECTION_LEFT
class RDACV(BaseLogisticCV):
_bound_model = RDAModel
_cv_field_name = 'alpha'
_one_standard_rule = False
def _model_test(self, model, cv_x, cv_y):
X = model._pre_processing_x(cv_x)
N = X.shape[0]
err = 0
for k in range(model.n_class):
d = model.quadratic_discriminant_func(X, k)
t = d.diagonal()
err += sum(t[cv_y ==(k+1)])
return err*(-2) / N
| {
"repo_name": "littlezz/ESL-Model",
"path": "esl_model/ch7/models.py",
"copies": "1",
"size": "6907",
"license": "mit",
"hash": 6670187029120702000,
"line_mean": 33.3631840796,
"line_max": 118,
"alpha_frac": 0.5752135515,
"autogenerated": false,
"ratio": 3.3351038145823275,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44103173660823275,
"avg_score": null,
"num_lines": null
} |
"""Apply patches to a parsed json document.
Add { op: "add", path: "/a/b/c", value: v }
Add value v to /a/b at index c. If /a/b is a list, c must be an integer, and
v will be inserted at c. To append a value to list, use a trailing / in the
path.
Remove { op: "remove", path: "/a/b/c" }
Remove the key c from dictionary /a/b, or the item at index c from list /a/b.
Replace { op: "replace", path: "/a/b/c", value: v, src: k }
Replace the item at c on /a/b with v. If check is specified, the patch will
fail if the existing value at c is not k.
Merge { op: "merge", path: "/a/b/c", value: { k0: v0, k1: v1, ..., kn: vn } }
This will create or replace all k0 - kn on c. It can be used to group
multiple adds and replaces, or can act as an upsert.
Copy { op: "copy", path: "/b/c/d", src: "/a/b/c" }
Copy the value at /a/b/c to d on /b/c.
Move { op: "move", path: "/b/c/d", src: "/a/b/c" }
Equivalent to a copy from 'src' to 'path' followed by a remove at 'path'.
Test { op: "test", path: "/a/b/c" [, value: v] }
Test that the index c on /a/b is specified. If value is given, check
additionally that the value at c on /a/b equals v
Set Remove { op: "setremove", path: "/a/b", value: v }
Remove v from the list at /a/b, if v is in /a/b.
Set Add { op: "setadd", path: "/a/b", value: v }
Add v to the list at /a/b if it doesn't already exist in the list.
All paths must conform to RFC 6901.
Inspiration from http://jsonpatch.com with additional features to handle
certain types of concurrent editing.
"""
from collections import namedtuple
from bidon.util import try_parse_int
_NO_VAL = object()
_PATH_SEP = "/"
Patch = namedtuple("Patch", ["op", "path", "value", "src"])
Patch.__new__.__defaults__ = (_NO_VAL, _NO_VAL)
class JSONPathError(Exception):
"""Instances of this Error are thrown when json_patch cannot understand a path in a given context.
"""
pass
class JSONPatchError(Exception):
"""Instances of this Error are thrown when json_patch is unable to perform a requested action."""
pass
def add(parent, idx, value):
"""Add a value to a dict."""
if isinstance(parent, dict):
if idx in parent:
raise JSONPatchError("Item already exists")
parent[idx] = value
elif isinstance(parent, list):
if idx == "" or idx == "~":
parent.append(value)
else:
parent.insert(int(idx), value)
else:
raise JSONPathError("Invalid path for operation")
def remove(parent, idx):
"""Remove a value from a dict."""
if isinstance(parent, dict):
del parent[idx]
elif isinstance(parent, list):
del parent[int(idx)]
else:
raise JSONPathError("Invalid path for operation")
def replace(parent, idx, value, check_value=_NO_VAL):
"""Replace a value in a dict."""
if isinstance(parent, dict):
if idx not in parent:
raise JSONPatchError("Item does not exist")
elif isinstance(parent, list):
idx = int(idx)
if idx < 0 or idx >= len(parent):
raise JSONPatchError("List index out of range")
if check_value is not _NO_VAL:
if parent[idx] != check_value:
raise JSONPatchError("Check value did not pass")
parent[idx] = value
def merge(parent, idx, value):
"""Merge a value."""
target = get_child(parent, idx)
for key, val in value.items():
target[key] = val
def copy(src_parent, src_idx, dest_parent, dest_idx):
"""Copy an item."""
if isinstance(dest_parent, list):
dest_idx = int(dest_idx)
dest_parent[dest_idx] = get_child(src_parent, src_idx)
def move(src_parent, src_idx, dest_parent, dest_idx):
"""Move an item."""
copy(src_parent, src_idx, dest_parent, dest_idx)
remove(src_parent, src_idx)
def test(parent, idx, value=_NO_VAL):
"""Check to see if an item exists."""
try:
val = get_child(parent, idx)
except Exception:
return False
if value is _NO_VAL:
return True
return val == value
def set_remove(parent, idx, value):
"""Remove an item from a list."""
lst = get_child(parent, idx)
if value in lst:
lst.remove(value)
def set_add(parent, idx, value):
"""Add an item to a list if it doesn't exist."""
lst = get_child(parent, idx)
if value not in lst:
lst.append(value)
def get_children(parent, idx):
"""Gets the child at parent[idx], or all the children if idx == "*"."""
if isinstance(parent, dict):
if idx in parent:
yield parent[idx]
else:
raise JSONPathError("Invalid path at {0}".format(idx))
elif isinstance(parent, list):
if idx == "*":
yield from parent
else:
is_int, i = try_parse_int(idx)
if is_int and i >= 0 and i < len(parent):
yield parent[i]
else:
raise JSONPathError("Invalid list index: {0}".format(i))
else:
raise JSONPathError("Type {0} does not have children".format(type(parent).__name__))
def get_child(parent, idx):
"""Get the first child according to idx."""
return next(get_children(parent, idx))
def parse_path(path):
"""Parse a rfc 6901 path."""
if not path:
raise ValueError("Invalid path")
if isinstance(path, str):
if path == "/":
raise ValueError("Invalid path")
if path[0] != "/":
raise ValueError("Invalid path")
return path.split(_PATH_SEP)[1:]
elif isinstance(path, (tuple, list)):
return path
else:
raise ValueError("A path must be a string, tuple or list")
def resolve_path(root, path):
"""Resolve a rfc 6901 path, returning the parent and the last path part."""
path = parse_path(path)
parent = root
for part in path[:-1]:
parent = get_child(parent, rfc_6901_replace(part))
return (parent, rfc_6901_replace(path[-1]))
def find(root, path):
"""Get the (first) child at path from root."""
return get_child(*resolve_path(root, path))
def find_all(root, path):
"""Get all children that satisfy the path."""
path = parse_path(path)
if len(path) == 1:
yield from get_children(root, path[0])
else:
for child in get_children(root, path[0]):
yield from find_all(child, path[1:])
def rfc_6901_replace(path):
"""Implements rfc 6901 escape code replacement."""
return path.replace("~1", "/").replace("~0", "~")
def apply_patch(document, patch):
"""Apply a Patch object to a document."""
# pylint: disable=too-many-return-statements
op = patch.op
parent, idx = resolve_path(document, patch.path)
if op == "add":
return add(parent, idx, patch.value)
elif op == "remove":
return remove(parent, idx)
elif op == "replace":
return replace(parent, idx, patch.value, patch.src)
elif op == "merge":
return merge(parent, idx, patch.value)
elif op == "copy":
sparent, sidx = resolve_path(document, patch.src)
return copy(sparent, sidx, parent, idx)
elif op == "move":
sparent, sidx = resolve_path(document, patch.src)
return move(sparent, sidx, parent, idx)
elif op == "test":
return test(parent, idx, patch.value)
elif op == "setremove":
return set_remove(parent, idx, patch.value)
elif op == "setadd":
return set_add(parent, idx, patch.value)
else:
raise JSONPatchError("Invalid operator")
def apply_patches(document, patches):
"""Serially apply all patches to a document."""
for i, patch in enumerate(patches):
try:
result = apply_patch(document, patch)
if patch.op == "test" and result is False:
raise JSONPatchError("Test patch {0} failed. Cancelling entire set.".format(i + 1))
except Exception as ex:
raise JSONPatchError("An error occurred with patch {0}: {1}".format(i + 1, ex)) from ex
| {
"repo_name": "treycucco/bidon",
"path": "bidon/json_patch.py",
"copies": "1",
"size": "7542",
"license": "mit",
"hash": 628688902057338900,
"line_mean": 27.6768060837,
"line_max": 100,
"alpha_frac": 0.6453195439,
"autogenerated": false,
"ratio": 3.321003963011889,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4466323506911889,
"avg_score": null,
"num_lines": null
} |
# Apply Prewitt filter
#
import sys, os
import numpy as np
from scipy.ndimage import prewitt
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
# These are the attribute parameters
#
xa.params = {
'Input': 'Input',
'Output': ['Average gradient', 'In-line gradient', 'Cross-line gradient', 'Z gradient'],
'ZSampMargin' : {'Value': [-1,1], 'Hidden': True},
'StepOut' : {'Value': [1,1], 'Hidden': True},
'Help' : 'https://gist.github.com/waynegm/84f323ec4aab3961c23d'
}
#
# Define the compute function
#
def doCompute():
#
# index of current trace position in Input numpy array
#
ilndx = xa.SI['nrinl']//2
crldx = xa.SI['nrcrl']//2
while True:
xa.doInput()
xa.Output['In-line gradient'] = prewitt(xa.Input, axis=0)[ilndx,crldx,:]
xa.Output['Cross-line gradient'] = prewitt(xa.Input, axis=1)[ilndx,crldx,:]
xa.Output['Z gradient'] = prewitt(xa.Input, axis=2)[ilndx,crldx,:]
xa.Output['Average gradient'] = (xa.Output['In-line gradient']
+ xa.Output['Cross-line gradient']
+ xa.Output['Z gradient'])/3
xa.doOutput()
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
| {
"repo_name": "waynegm/OpendTect-External-Attributes",
"path": "Python_3/tests/ex_prewitt_filter.py",
"copies": "1",
"size": "1272",
"license": "mit",
"hash": -8707223864004890000,
"line_mean": 24.9591836735,
"line_max": 89,
"alpha_frac": 0.6627358491,
"autogenerated": false,
"ratio": 2.6892177589852007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8486234614469909,
"avg_score": 0.07314379872305846,
"num_lines": 49
} |
"""Apply simple tranformations to the views"""
from qtpy import QtWidgets
from .pdsspect_image_set import PDSSpectImageSetViewBase
class TransformsController(object):
"""Controller for :class:`Transforms`
Parameters
----------
image_set : :class:`~.pdsspect_image_set.PDSSpectImageSet`
pdsspect model
view : :class:`Transforms`
View to control
Attributes
----------
image_set : :class:`~.pdsspect_image_set.PDSSpectImageSet`
pdsspect model
view : :class:`Transforms`
View to control
"""
def __init__(self, image_set, view):
self.image_set = image_set
self.view = view
def set_flip_x(self, flip_x):
"""Set :attr:`~.pdsspect_image_set.PDSSpectImageSet.flip_x`
Parameters
----------
flip_x : :obj:`bool`
True to flip x axis, otherwise, False
"""
self.image_set.flip_x = flip_x
for image_set in self.image_set.subsets:
image_set.flip_x = flip_x
def set_flip_y(self, flip_y):
"""Set :attr:`~.pdsspect_image_set.PDSSpectImageSet.flip_y`
Parameters
----------
flip_y : :obj:`bool`
True to flip y axis, otherwise, False
"""
self.image_set.flip_y = flip_y
for image_set in self.image_set.subsets:
image_set.flip_y = flip_y
def set_swap_xy(self, swap_xy):
"""Set :attr:`~.pdsspect_image_set.PDSSpectImageSet.swap_xy`
Parameters
----------
swap_xy : :obj:`bool`
True to swap x and y axis, otherwise, False
"""
self.image_set.swap_xy = swap_xy
for image_set in self.image_set.subsets:
image_set.swap_xy = swap_xy
class Transforms(QtWidgets.QDialog, PDSSpectImageSetViewBase):
"""Window to apply simple transformations
Parameters
----------
image_set : :class:`~.pdsspect_image_set.PDSSpectImageSet`
pdsspect model
view_canvas : :class:`.pds_image_view_canvas.PDSImageViewCanvas`
The view canvas to apply transformations to
Attributes
----------
image_set : :class:`~.pdsspect_image_set.PDSSpectImageSet`
pdsspect model
view_canvas : :class:`.pds_image_view_canvas.PDSImageViewCanvas`
The view canvas to apply transformations to
controller : :class:`TransformsController`
The view's controller
flip_x_label : :class:`QtWidgets.QLabel <PySide.QtGui.QLabel>`
Label for :attr:`flip_x_box`
flip_x_box : :class:`QtWidgets.QCheckBox <PySide.QtGui.QCheckBox>`
Flip x axis when checked
flip_y_label : :class:`QtWidgets.QLabel <PySide.QtGui.QLabel>`
Label for :attr:`flip_y_box`
flip_y_box : :class:`QtWidgets.QCheckBox <PySide.QtGui.QCheckBox>`
Flip y axis when checked
swap_xy_label : :class:`QtWidgets.QLabel <PySide.QtGui.QLabel>`
Label for :attr:`swap_xy_box`
swap_xy_box : :class:`QtWidgets.QCheckBox <PySide.QtGui.QCheckBox>`
Swap x and y axis when checked
layout : :class:`QtWidgets.QGridLayout <PySide.QtGui.QGridLayout>`
Layout for widget
"""
def __init__(self, image_set, view_canvas):
super(Transforms, self).__init__()
self.image_set = image_set
self.view_canvas = view_canvas
self.image_set.register(self)
self.controller = TransformsController(image_set, self)
self.flip_x_label = QtWidgets.QLabel("Flip X Axes")
self.flip_x_box = QtWidgets.QCheckBox()
self.flip_x_box.stateChanged.connect(self.flip_x_checked)
self.flip_y_label = QtWidgets.QLabel("Flip Y Axes")
self.flip_y_box = QtWidgets.QCheckBox()
self.flip_y_box.stateChanged.connect(self.flip_y_checked)
self.swap_xy_label = QtWidgets.QLabel("Swap X and Y Axes")
self.swap_xy_box = QtWidgets.QCheckBox()
self.swap_xy_box.stateChanged.connect(self.swap_xy_checked)
self.layout = QtWidgets.QGridLayout()
self.layout.addWidget(self.flip_x_label, 0, 0)
self.layout.addWidget(self.flip_x_box, 0, 1)
self.layout.addWidget(self.flip_y_label, 1, 0)
self.layout.addWidget(self.flip_y_box, 1, 1)
self.layout.addWidget(self.swap_xy_label, 2, 0)
self.layout.addWidget(self.swap_xy_box, 2, 1)
self.setWindowTitle("Tranformations")
self.setLayout(self.layout)
def flip_x_checked(self, state):
"""Flip x axis when checked
Parameters
----------
state : :obj:`int`
The state of the checkbox (this argument is ignored and the state
is checked in a more explicit way)
"""
if self.flip_x_box.isChecked():
self.controller.set_flip_x(True)
else:
self.controller.set_flip_x(False)
def flip_y_checked(self, state):
"""Flip y axis when checked
Parameters
----------
state : :obj:`int`
The state of the checkbox (this argument is ignored and the state
is checked in a more explicit way)
"""
if self.flip_y_box.isChecked():
self.controller.set_flip_y(True)
else:
self.controller.set_flip_y(False)
def swap_xy_checked(self, state):
"""Swap x and y axis when checked
Parameters
----------
state : :obj:`int`
The state of the checkbox (this argument is ignored and the state
is checked in a more explicit way)
"""
if self.swap_xy_box.isChecked():
self.controller.set_swap_xy(True)
else:
self.controller.set_swap_xy(False)
| {
"repo_name": "planetarypy/pdsspect",
"path": "pdsspect/transforms.py",
"copies": "1",
"size": "5701",
"license": "bsd-3-clause",
"hash": 2089581515304993000,
"line_mean": 31.209039548,
"line_max": 77,
"alpha_frac": 0.6018242414,
"autogenerated": false,
"ratio": 3.6875808538163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47894050952163003,
"avg_score": null,
"num_lines": null
} |
#Apply the calibration fields to the RAW science data to produce
#reduced science data.
#Import whatever modules will be used
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.io import fits, ascii
from scipy import stats
# Import AstroImage
import astroimage as ai
# Add the header handler to the BaseImage class
from PRISM_header_handler import PRISM_header_handler
ai.BaseImage.set_header_handler(PRISM_header_handler)
#Setup the path delimeter for this operating system
delim = os.path.sep
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of the raw data for the observing run
rawDir = 'C:\\Users\\Jordan\\FITS Data\\PRISM_data\\raw_data\\201612\\'
# Define the path to the parent directory for all pyBDP products
pyBDP_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyBDP_data\\201612\\'
# Define the directory into which the average calibration images will be placed
calibrationDir = os.path.join(pyBDP_data, 'master_calibration_images')
# Reduced directory (for saving the final images)
reducedDir = os.path.join(pyBDP_data, 'pyBDP_reduced_images')
if (not os.path.isdir(reducedDir)):
os.mkdir(reducedDir, 0o755)
# Read the fileIndex back in as an astropy Table
print('\nReading file index from disk')
indexFile = os.path.join(pyBDP_data, 'rawFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
biasBool = (fileIndex['OBSTYPE'] == 'BIAS')
darkBool = (fileIndex['OBSTYPE'] == 'DARK')
flatBool = (fileIndex['OBSTYPE'] == 'FLAT')
sciBool = (fileIndex['OBSTYPE'] == 'OBJECT')
waveBand = fileIndex['FILTER']
polAng = fileIndex['POLPOS']
binning = fileIndex['BINNING']
#==============================================================================
# ***************************** BIAS *****************************************
# Setup the paths to the bias images and compute the bias map
#==============================================================================
# Find the number of unique binnings used in biases
uniqBins = np.unique(binning)
masterBiasDict = {}
for thisBin in uniqBins:
# Construct the filename for this bias.
masterBiasFilename = 'MasterBias{0:g}.fits'.format(thisBin)
masterBiasFilename = os.path.join(calibrationDir, masterBiasFilename)
# Read in the masterBias file
print('\nLoading file into masterBias list')
print(masterBiasFilename)
masterBias = ai.MasterBias.read(masterBiasFilename)
masterBias = masterBias.astype(np.float32)
masterBiasDict.update({thisBin: masterBias})
#==============================================================================
# ***************************** DARKS *****************************************
# Setup the paths to the dark images and compute the dark current map
#==============================================================================
masterDarkDict = {}
for thisBin in uniqBins:
# Construct the filename for this dark.
masterDarkFilename = 'MasterDark{0:g}.fits'.format(thisBin)
masterDarkFilename = os.path.join(calibrationDir, masterDarkFilename)
# Read in the file
print('\nLoading file into masterDark list')
print(masterDarkFilename)
masterDark = ai.MasterDark.read(masterDarkFilename)
masterDark = masterDark.astype(np.float32)
masterDarkDict.update({thisBin: masterDark})
#==============================================================================
# ***************************** FLATS *****************************************
# Setup the paths to the flat images and compute the flat map
#==============================================================================
# Find the number of unique wavebands used in flats
uniqBands = np.unique(waveBand)
# Create an empty dictionary to store the masterFlatDict,
# keyed to each band/polAng/binning combination
masterFlatDict = {}
#Loop through each waveband
for thisBand in uniqBands:
# Compute the unique values for the polaroid rotation angle
thisFlatWaveBool = np.logical_and(flatBool, (waveBand == thisBand))
thisFlatWaveInds = np.where(thisFlatWaveBool)
uniqPolAngs = np.unique(polAng[thisFlatWaveInds])
for thisAng in uniqPolAngs:
# Compute the unique values for the binning level
thisFlatAngBool = np.logical_and(thisFlatWaveBool, (polAng == thisAng))
thisFlatAngInds = np.where(thisFlatAngBool)
uniqBins = np.unique(binning[thisFlatAngInds]).astype(int)
for thisBin in uniqBins:
# Construct the flatKey and filename for this image
flatKey = (thisBand, thisAng, thisBin)
flatKeyStr = '{0:s}_{1:g}_{2:g}'.format(*flatKey)
masterFlatFilename = 'MasterFlat' + flatKeyStr + '.fits'
masterFlatFilename = os.path.join(calibrationDir, masterFlatFilename)
# Read in the masterFlat file
print('\nLoading file into masterFlat list')
print(masterFlatFilename)
masterFlat = ai.MasterFlat.read(masterFlatFilename)
masterFlat = masterFlat.astype(np.float32)
masterFlatDict.update({flatKey: masterFlat})
#==============================================================================
# **************************** SCIENCE ****************************************
# Setup the paths to the
#==============================================================================
# Grab the indices of the science images
scienceInds = np.where(sciBool)
scienceImgFiles = fileIndex['Filename'][scienceInds]
print('\nBeginning to reduce science data.')
for filename in scienceImgFiles:
# Read in the raw science image from disk
rawScience = ai.RawScience.read(filename)
# Extract the information on this file
thisBand = rawScience.waveBand
thisAng = rawScience.header['POLPOS']
thisBin = np.int(np.unique(rawScience.binning))
# Construct the key for the flat dictionary
thisFlatKey = (thisBand, thisAng, thisBin)
# Process this raw science image using the relevant calibration files
reducedScience = rawScience.process_image(
bias=masterBiasDict[thisBin],
dark=masterDarkDict[thisBin],
flat=masterFlatDict[thisFlatKey]
)
# Write the file to disk
reducedFileName = os.path.join(
reducedDir,
os.path.basename(rawScience.filename)
)
reducedScience.write(reducedFileName, dtype=np.float32, clobber=True)
# Let the user know everything is finished
print('\n..........')
print('Finished reducing science data.')
| {
"repo_name": "jmontgom10/pyBDP",
"path": "03_reduceScienceData.py",
"copies": "2",
"size": "6909",
"license": "mit",
"hash": 7142826908752526000,
"line_mean": 39.1686046512,
"line_max": 81,
"alpha_frac": 0.6031263569,
"autogenerated": false,
"ratio": 4.033274956217163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5636401313117163,
"avg_score": null,
"num_lines": null
} |
""" Apply The Cannon to the AAOmega Spectra! """
import numpy as np
import matplotlib.pyplot as plt
import sys
from TheCannon import dataset
from TheCannon import model
DATA_DIR = '/Users/annaho/Data/AAOmega/Run_13_July'
SMALL = 1.0 / 1000000000.0
def test_step_iteration(ds, md, starting_guess):
errs, chisq = md.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def choose_reference_set():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
all_id = np.load("%s/ref_id_all.npz" %DATA_DIR)['arr_0']
all_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0']
all_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0']
all_label = np.load("%s/ref_label_all.npz" %DATA_DIR)['arr_0']
all_ivar = np.load("%s/ref_ivar_corr.npz" %DATA_DIR)['arr_0']
# choose reference objects
good_teff = np.logical_and(
all_label[:,0] > 4000, all_label[:,0] < 6000)
good_feh = np.logical_and(
all_label[:,2] > -2, all_label[:,2] < 0.3)
good_logg = np.logical_and(
all_label[:,1] > 1, all_label[:,1] < 3)
good_vrot = all_label[:,4] < 20.0
good_scat = all_scat < 0.1
good1 = np.logical_and(good_teff, good_feh)
good2 = np.logical_and(good_logg, good_vrot)
good12 = np.logical_and(good1, good2)
good = np.logical_and(good12, good_scat)
ref_id = all_id[good]
print("%s objects chosen for reference set" %len(ref_id))
ref_flux = all_flux[good]
ref_ivar = all_ivar[good]
ref_label = all_label[good]
np.savez("%s/ref_id.npz" %DATA_DIR, ref_id)
np.savez("%s/ref_flux.npz" %DATA_DIR, ref_flux)
np.savez("%s/ref_ivar.npz" %DATA_DIR, ref_ivar)
np.savez("%s/ref_label.npz" %DATA_DIR, ref_label)
def update_cont():
contpix = np.load("wl_contpix_old.npz")['arr_0']
# this array is a bit too long, clip it off
contpix_new = contpix[np.logical_and(contpix>8420, contpix<8700)]
inds = np.zeros(contpix_new.shape, dtype=int)
for i,val in enumerate(contpix_new):
# find the nearest pixel
inds[i] = int(np.argmin(np.abs(wl-val)))
contmask = np.zeros(len(wl), dtype=bool)
contmask[inds] = 1
np.savez("wl_contmask.npz", contmask)
print("SAVED")
def normalize_ref_set():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
ref_id = np.load("%s/ref_id.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux.npz" %DATA_DIR)['arr_0']
ref_ivar = np.load("%s/ref_ivar.npz" %DATA_DIR)['arr_0']
ref_label = np.load("%s/ref_label.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, ref_id, ref_flux, ref_ivar, ref_label,
ref_id, ref_flux, ref_ivar)
contmask = np.load("%s/wl_contmask.npz" %DATA_DIR)['arr_0']
ds.set_continuum(contmask)
cont = ds.fit_continuum(3, "sinusoid")
np.savez("%s/ref_cont.npz" %DATA_DIR, cont)
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
ds.continuum_normalize(cont)
bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1)
norm_tr_ivar[bad] = 0.0
np.savez("%s/ref_flux_norm.npz" %DATA_DIR, norm_tr_flux)
np.savez("%s/ref_ivar_norm.npz" %DATA_DIR, norm_tr_ivar)
def normalize_test_set():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
test_id = np.load("%s/test_id.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0']
test_ivar = np.load("%s/test_ivar_corr.npz" %DATA_DIR)['arr_0']
test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0']
contmask = np.load("%s/wl_contmask.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, test_id[0:2], test_flux[0:2], test_ivar[0:2], wl,
test_id, test_flux, test_ivar)
ds.set_continuum(contmask)
# For the sake of the normalization, no pixel with flux >= 3 sigma
# should be continuum.
for ii,spec in enumerate(ds.test_flux):
err = test_scat[ii]
bad = np.logical_and(
ds.contmask == True, np.abs(1-spec) >= 3*err)
ds.test_ivar[ii][bad] = SMALL
cont = ds.fit_continuum(3, "sinusoid")
np.savez("%s/test_cont.npz" %DATA_DIR, cont)
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
ds.continuum_normalize(cont)
bad = np.logical_or(test_flux <= 0, test_flux > 1.1)
norm_test_ivar[bad] = 0.0
np.savez("%s/test_flux_norm.npz" %DATA_DIR, norm_test_flux)
np.savez("%s/test_ivar_norm.npz" %DATA_DIR, norm_test_ivar)
def choose_training_set():
ref_id = np.load("%s/ref_id.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux_norm.npz" %DATA_DIR)['arr_0']
ref_ivar = np.load("%s/ref_ivar_norm.npz" %DATA_DIR)['arr_0']
ref_label = np.load("%s/ref_label.npz" %DATA_DIR)['arr_0']
# randomly pick 80% of the objects to be the training set
nobj = len(ref_id)
assignments = np.random.randint(10, size=nobj)
# if you're < 8, you're training
choose = assignments < 8
tr_id = ref_id[choose]
tr_flux = ref_flux[choose]
tr_ivar = ref_ivar[choose]
tr_label = ref_label[choose]
np.savez("%s/tr_id.npz" %DATA_DIR, tr_id)
np.savez("%s/tr_flux_norm.npz" %DATA_DIR, tr_flux)
np.savez("%s/tr_ivar_norm.npz" %DATA_DIR, tr_ivar)
np.savez("%s/tr_label.npz" %DATA_DIR, tr_label)
val_id = ref_id[~choose]
val_flux = ref_flux[~choose]
val_ivar = ref_ivar[~choose]
val_label = ref_label[~choose]
np.savez("%s/val_id.npz" %DATA_DIR, val_id)
np.savez("%s/val_flux_norm.npz" %DATA_DIR, val_flux)
np.savez("%s/val_ivar_norm.npz" %DATA_DIR, val_ivar)
np.savez("%s/val_label.npz" %DATA_DIR, val_label)
def train():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
tr_id = np.load("%s/tr_id.npz" %DATA_DIR)['arr_0']
tr_flux = np.load("%s/tr_flux_norm.npz" %DATA_DIR)['arr_0']
tr_ivar = np.load("%s/tr_ivar_norm.npz" %DATA_DIR)['arr_0']
tr_label = np.load("%s/tr_label.npz" %DATA_DIR)['arr_0']
val_id = np.load("%s/val_id.npz" %DATA_DIR)['arr_0']
val_flux = np.load("%s/val_flux_norm.npz" %DATA_DIR)['arr_0']
val_ivar = np.load("%s/val_ivar_norm.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label[:,0:4],
val_id, val_flux, val_ivar)
ds.set_label_names(["Teff", "logg", "FeH", 'aFe'])
np.savez("%s/tr_SNR.npz" %DATA_DIR, ds.tr_SNR)
fig = ds.diagnostics_SNR()
plt.savefig("%s/SNR_dist.png" %DATA_DIR)
plt.close()
fig = ds.diagnostics_ref_labels()
plt.savefig("%s/ref_label_triangle.png" %DATA_DIR)
plt.close()
md = model.CannonModel(2)
md.fit(ds)
fig = md.diagnostics_leading_coeffs(ds)
plt.savefig("%s/leading_coeffs.png" %DATA_DIR)
plt.close()
np.savez("%s/coeffs.npz" %DATA_DIR, md.coeffs)
np.savez("%s/scatters.npz" %DATA_DIR, md.scatters)
np.savez("%s/chisqs.npz" %DATA_DIR, md.chisqs)
np.savez("%s/pivots.npz" %DATA_DIR, md.pivots)
def validate():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
tr_id = np.load("%s/tr_id.npz" %DATA_DIR)['arr_0']
tr_flux = np.load("%s/tr_flux_norm.npz" %DATA_DIR)['arr_0']
tr_ivar = np.load("%s/tr_ivar_norm.npz" %DATA_DIR)['arr_0']
val_id = np.load("%s/val_id.npz" %DATA_DIR)['arr_0']
val_flux = np.load("%s/val_flux_norm.npz" %DATA_DIR)['arr_0']
val_ivar = np.load("%s/val_ivar_norm.npz" %DATA_DIR)['arr_0']
val_label = np.load("%s/val_label.npz" %DATA_DIR)['arr_0']
coeffs = np.load("%s/coeffs.npz" %DATA_DIR)['arr_0']
scatters = np.load("%s/scatters.npz" %DATA_DIR)['arr_0']
chisqs = np.load("%s/chisqs.npz" %DATA_DIR)['arr_0']
pivots = np.load("%s/pivots.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, val_label[:,0:4],
val_id, val_flux, val_ivar)
np.savez("%s/val_SNR.npz" %DATA_DIR, ds.test_SNR)
ds.set_label_names(["Teff", "logg", "FeH", "aFe"])
md = model.CannonModel(2)
md.coeffs = coeffs
md.scatters = scatters
md.chisqs = chisqs
md.pivots = pivots
md.diagnostics_leading_coeffs(ds)
nguesses = 7
nobj = len(ds.test_ID)
nlabels = ds.tr_label.shape[1]
choose = np.random.randint(0,nobj,size=nguesses)
starting_guesses = ds.tr_label[choose]-md.pivots
labels = np.zeros((nguesses, nobj, nlabels))
chisq = np.zeros((nguesses, nobj))
errs = np.zeros(labels.shape)
for ii,guess in enumerate(starting_guesses):
a,b,c = test_step_iteration(ds,md,starting_guesses[ii])
labels[ii,:] = a
chisq[ii,:] = b
errs[ii,:] = c
np.savez("%s/val_labels_all_starting_vals.npz" %DATA_DIR, labels)
np.savez("%s/val_chisq_all_starting_vals.npz" %DATA_DIR, chisq)
np.savez("%s/val_errs_all_starting_vals.npz" %DATA_DIR, errs)
choose = np.argmin(chisq, axis=0)
best_chisq = np.min(chisq, axis=0)
best_labels = np.zeros((nobj, nlabels))
best_errs = np.zeros(best_labels.shape)
for jj,val in enumerate(choose):
best_labels[jj,:] = labels[:,jj,:][val]
best_errs[jj,:] = errs[:,jj,:][val]
np.savez("%s/val_cannon_labels.npz" %DATA_DIR, best_labels)
np.savez("%s/val_errs.npz" %DATA_DIR, best_errs)
np.savez("%s/val_chisq.npz" %DATA_DIR, best_chisq)
ds.test_label_vals = best_labels
ds.diagnostics_1to1()
def test():
wl = np.load("%s/wl.npz" %DATA_DIR)['arr_0']
tr_id = np.load("%s/tr_id.npz" %DATA_DIR)['arr_0']
tr_flux = np.load("%s/tr_flux_norm.npz" %DATA_DIR)['arr_0']
tr_ivar = np.load("%s/tr_ivar_norm.npz" %DATA_DIR)['arr_0']
test_id = np.load("%s/test_id.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux_norm.npz" %DATA_DIR)['arr_0']
test_ivar = np.load("%s/test_ivar_norm.npz" %DATA_DIR)['arr_0']
tr_label = np.load("%s/tr_label.npz" %DATA_DIR)['arr_0']
coeffs = np.load("%s/coeffs.npz" %DATA_DIR)['arr_0']
scatters = np.load("%s/scatters.npz" %DATA_DIR)['arr_0']
chisqs = np.load("%s/chisqs.npz" %DATA_DIR)['arr_0']
pivots = np.load("%s/pivots.npz" %DATA_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label[:,0:4],
test_id, test_flux, test_ivar)
np.savez("%s/test_SNR.npz" %DATA_DIR, ds.test_SNR)
ds.set_label_names(["Teff", "logg", "FeH", "aFe"])
md = model.CannonModel(2)
md.coeffs = coeffs
md.scatters = scatters
md.chisqs = chisqs
md.pivots = pivots
md.diagnostics_leading_coeffs(ds)
nguesses = 7
nobj = len(ds.test_ID)
nlabels = ds.tr_label.shape[1]
choose = np.random.randint(0,nobj,size=nguesses)
starting_guesses = ds.tr_label[choose]-md.pivots
labels = np.zeros((nguesses, nobj, nlabels))
chisq = np.zeros((nguesses, nobj))
errs = np.zeros(labels.shape)
ds.tr_label = np.zeros((nobj, nlabels))
for ii,guess in enumerate(starting_guesses):
a,b,c = test_step_iteration(ds,md,starting_guesses[ii])
labels[ii,:] = a
chisq[ii,:] = b
errs[ii,:] = c
np.savez("%s/labels_all_starting_vals.npz" %DATA_DIR, labels)
np.savez("%s/chisq_all_starting_vals.npz" %DATA_DIR, chisq)
np.savez("%s/errs_all_starting_vals.npz" %DATA_DIR, errs)
choose = np.argmin(chisq, axis=0)
best_chisq = np.min(chisq, axis=0)
best_labels = np.zeros((nobj, nlabels))
best_errs = np.zeros(best_labels.shape)
for jj,val in enumerate(choose):
best_labels[jj,:] = labels[:,jj,:][val]
best_errs[jj,:] = errs[:,jj,:][val]
np.savez("%s/test_cannon_labels.npz" %DATA_DIR, best_labels)
np.savez("%s/test_errs.npz" %DATA_DIR, best_errs)
np.savez("%s/test_chisq.npz" %DATA_DIR, best_chisq)
ds.test_label_vals = best_labels
if __name__=="__main__":
#choose_reference_set()
#normalize_ref_set()
#normalize_test_set()
#choose_training_set()
#train()
#validate()
test()
| {
"repo_name": "annayqho/TheCannon",
"path": "code/aaomega/aaomega_run_cannon.py",
"copies": "1",
"size": "11861",
"license": "mit",
"hash": -1011203198642544100,
"line_mean": 35.1615853659,
"line_max": 70,
"alpha_frac": 0.5991906247,
"autogenerated": false,
"ratio": 2.5092024539877302,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.360839307868773,
"avg_score": null,
"num_lines": null
} |
"""Apply the mask to the nifiti data
Usage: roinii_butterfly roiname newname
"""
import re
import os
import sys
from roi.pre import join_time
import nibabel as nb
from metaaccumulate.data.nii import findallnii, masknii
def create(mask, path, niipartial, newname, restart=False):
"""Create a roi dataset using all Ploran 2007 (i.e. butterfly)
datasets.
Input
-----
mask - name of a roi found in the Harvard Oxford atlas (see roi
package for details)
path - the base path where all the BOLD data lives
niipartial - a string the matches all data of interest
newname - a name for all the combined roi data. Note: all roi data is
saved in ./roinii/
"""
try:
os.mkdir("./roinii")
except OSError:
pass
# Get and order the nii files
print("Finding nii files.")
niis = findallnii(path, niipartial)
niis, bysub = _ordernii_butterfly(niis)
# Mask and combine
roinames = {}
for sub in sorted(bysub.keys()):
roinames[sub] = []
for scan in bysub[sub]:
if scan != None:
roiname = _mask_butterfly(mask, scan, newname, restart)
roinames[sub].append(roiname)
else:
roinames[sub].append(None)
print("Combining all roi data by subject.")
for sub, scans in roinames.items():
scans = [scan for scan in scans if scan != None]
## Drop None
# Init combinedniis then add the
# remaining scans to it and save
# the result
print("Combining {0}.".format(sub))
combinedniis = nb.load(scans.pop(0))
for scan in scans:
combinedniis = join_time(combinedniis, nb.load(scan))
print("Saving {0}.".format(sub))
nb.save(combinedniis,
os.path.join(
"./roinii", "{0}_{1}.nii.gz".format(newname, sub)))
# [os.remove(scan) for scan in scans]
## Clean up old scans
def _mask_butterfly(mask, nii, newname, restart):
"""Apply mask to nii, save as newname."""
print("Masking {0} with {1}.".format(os.path.basename(nii), mask))
# Create a name for the temp roi data
# then use the mask, saving with the tmpname
name = newname + "_" + os.path.basename(nii)
name = os.path.join(path, "roinii", name)
# Ensure we are compressing the data
fullname = os.path.basename(name)
filename, fileextension = os.path.splitext(fullname)
name = os.path.join(
os.path.dirname(name), "{0}.nii.gz".format(filename))
# If the file exists and we are
# restarting continue on.
if restart and os.path.isfile(name):
print("{0} exists, continuing on.".format(name))
else:
# Mask! Save as name
masknii(mask, nii, save=name)
return name
def _ordernii_butterfly(niis):
"""Order a the provided list of nifti1 (.nii) files as appropriate for the
Ploran 2007 dataset (a.k.a butterfly).
"""
scanmap = {}
## Keyed on scode, values are a list of scans
for fipath in niis:
fi = os.path.basename(fipath)
fi_name_parts = fi.split('_')
scode = fi_name_parts[0]
scode = int(scode[1:])
## strip 's' from subcode, e.g 's4' -> 4
# Parse the scancode, looking for the scan number
scancode = fi_name_parts[1]
mat = re.match("^b\d+", scancode)
scannum = int(mat.group()[1:]) - 1
## mat.group() should contain, for example, 'b2'
## so we drop the first letter and
## cast to an int then offset by 1 so it can
## be used as an index into a list (e.g. 'b2' -> 1)
# Debug:
# print("{0} match: {2}, scan: {1}".format(fi, scannum, mat.group()))
# If scode is in scanmap add fipath (not fi)
# otherwise init scode first
max_num_scans = 10 ## ....for Ploran 2007
if scode in scanmap.keys():
scanmap[scode][scannum] = fipath
else:
scanmap[scode] = [None, ] * max_num_scans
scanmap[scode][scannum] = fipath
# Use scanmap to create an ordered list of niis
orderedniis = []
[orderedniis.extend(scanmap[sub]) for sub in sorted(scanmap.keys())]
## Want a 1d list thus .extend()
orderedniis = [nii for nii in orderedniis if nii != None]
## Drop Nones
return orderedniis, scanmap
if __name__ == '__main__':
"""Command line invocation setup."""
# ----
# User parameters
niipartial = "MNI152_3mm.4dfp.nii"
restart = True
# ----
# Create a place for the roi data to live
# if necessary.
try:
os.mkdir("./roinii")
except OSError:
pass
# Process argv
if len(sys.argv[1:]) != 2:
raise ValueError("Two arguments are required.")
mask = sys.argv[1]
newname = sys.argv[2]
path = os.getcwd()
# Go!
create(mask, path, niipartial, newname, restart)
| {
"repo_name": "parenthetical-e/wheelerdata",
"path": "preprocess/archive/roinii_butterfly.py",
"copies": "1",
"size": "5115",
"license": "bsd-2-clause",
"hash": 1263939706202087700,
"line_mean": 28.9122807018,
"line_max": 79,
"alpha_frac": 0.5704789834,
"autogenerated": false,
"ratio": 3.544698544698545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46151775280985446,
"avg_score": null,
"num_lines": null
} |
"""Apply the mask to the nifiti data
Usage: roinii_fh_custom path/to/mask.hdr
"""
import os
import sys
import nibabel as nb
from roi.pre import mask as masknii
if __name__ == "__main__":
# ----
# Create a place for the roi data to
# live if necessary.
# ----
# Process argv
if len(sys.argv[1:]) != 1:
raise ValueError("One argument are required.")
mask = sys.argv[1]
try:
os.mkdir("./roinii")
except OSError:
pass
basepath = "/data/data2/meta_accumulate/fh"
# ----
# Link subjects with paths to data
subdatatable = {9 : os.path.join(basepath, 'fh09', 'warfh.nii'),
11 : os.path.join(basepath, 'fh11', 'warfh.nii'),
13 : os.path.join(basepath, 'fh13', 'warfh.nii'),
14 : os.path.join(basepath, 'fh14', 'warfh.nii'),
15 : os.path.join(basepath, 'fh15', 'warfh.nii'),
17 : os.path.join(basepath, 'fh17', 'warfh.nii'),
19 : os.path.join(basepath, 'fh19', 'warfh.nii'),
21 : os.path.join(basepath, 'fh21', 'warfh.nii'),
23 : os.path.join(basepath, 'fh23', 'warfh.nii'),
24 : os.path.join(basepath, 'fh24', 'warfh.nii'),
25 : os.path.join(basepath, 'fh25', 'warfh.nii'),
26 : os.path.join(basepath, 'fh26', 'warfh.nii'),
27 : os.path.join(basepath, 'fh27', 'warfh.nii'),
28 : os.path.join(basepath, 'fh28', 'warfh.nii')}
for scode, nii in subdatatable.items():
print("Masking {0} with {1}.".format(nii, mask))
# Isolate the maskname from its path
_, maskname = os.path.split(mask)
maskname, _ = os.path.splitext(maskname)
nii_data = nb.load(nii)
mask_data = nb.load(mask)
masked_nii_data = masknii(nii_data, mask_data, standard=False)
## Even though we're in MNI152 the q_form
## for the fidl converted data is not set correctly
## the s_form is what the q should be
## thus standard=False
saveas = os.path.join(
basepath, "roinii", "{0}_{1}.nii.gz".format(maskname, scode))
print("Saving {0}.".format(saveas))
nb.save(masked_nii_data, saveas)
| {
"repo_name": "parenthetical-e/wheelerdata",
"path": "preprocess/archive/roinii_fh_custom.py",
"copies": "1",
"size": "2279",
"license": "bsd-2-clause",
"hash": -5611509460986953000,
"line_mean": 33.0149253731,
"line_max": 73,
"alpha_frac": 0.5454146556,
"autogenerated": false,
"ratio": 3.1304945054945055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8970299205311691,
"avg_score": 0.04112199115656279,
"num_lines": 67
} |
"""Apply the mask to the nifiti data
Usage: roinii_fh roifile
"""
import re
import os
import sys
import nibabel as nb
from roi.pre import join_time
from fmrilearn.load import load_roifile
from fmrilearn.preprocess.nii import findallnii
from fmrilearn.preprocess.nii import masknii
def create(args):
"""Create a roi dataset using all Ploran 2007 (i.e. butterfly)
datasets.
Parameters
----------
mask - name of a roi found in the Harvard Oxford atlas (see roi
package for details)
newname - what (if anything) to rename the roi data as,
often the default name (i.e. mask) are rather long
subdatatable - a dictionary whose keys are subject numbers
and whose values are absoluate paths to that Ss
whole brain (functional) data.
basepath - the top-level directory where all the Ss BOLD
(and other) data lives.
"""
# Process args, a dunmb way to make
# this function compatible with pool.map()
mask, newname, subdatatable, basepath = args
print("Mask is {0}.".format(mask))
maskednames = []
for s in sorted(subdatatable.keys()):
print("Running subject {0}.".format(s))
# Look up the location of that Ss data,
# and mask it, finally save the masked
# file to disk
datadir = subdatatable[s]
saveas = os.path.join(basepath,
'roinii', "{0}_{1}.nii.gz".format(newname, s))
masknii(mask, datadir, save=saveas)
if __name__ == '__main__':
"""Command line invocation setup."""
from multiprocessing import Pool
# ----
# User parameters
basepath = os.getcwd()
ncore = 3
## Set ncore > 1 if you want to
## parallelize the roi extractions
# ----
# Create a place for the roi data to
# live if necessary.
try:
os.mkdir("./roinii")
except OSError:
pass
# ----
# Process argv
if len(sys.argv[1:]) != 1:
raise ValueError("One argument are required.")
rois, names = load_roifile(sys.argv[1])
# ----
# Link subjects with paths to data
subdatatable = {9 : os.path.join(basepath, 'fh09', 'warfh.nii'),
11 : os.path.join(basepath, 'fh11', 'warfh.nii'),
13 : os.path.join(basepath, 'fh13', 'warfh.nii'),
14 : os.path.join(basepath, 'fh14', 'warfh.nii'),
15 : os.path.join(basepath, 'fh15', 'warfh.nii'),
17 : os.path.join(basepath, 'fh17', 'warfh.nii'),
19 : os.path.join(basepath, 'fh19', 'warfh.nii'),
21 : os.path.join(basepath, 'fh21', 'warfh.nii'),
23 : os.path.join(basepath, 'fh23', 'warfh.nii'),
24 : os.path.join(basepath, 'fh24', 'warfh.nii'),
25 : os.path.join(basepath, 'fh25', 'warfh.nii'),
26 : os.path.join(basepath, 'fh26', 'warfh.nii'),
27 : os.path.join(basepath, 'fh27', 'warfh.nii'),
28 : os.path.join(basepath, 'fh28', 'warfh.nii')}
# subdatatable = {
# 14 : os.path.join(basepath, 'fh14', 'warfh.nii')}
# Build up 4-tuples that contain all the args
# create needs, iterate over all the entries
# in the roifile
arglist = []
for roi, name in zip(rois, names):
arglist.append((roi, name, subdatatable, basepath))
# ---
# Go!
# Parallelize using the arglist and Pool
pool = Pool(ncore)
pool.map(create, arglist)
| {
"repo_name": "parenthetical-e/wheelerdata",
"path": "preprocess/archive/rfroinii_fh.py",
"copies": "1",
"size": "3479",
"license": "bsd-2-clause",
"hash": 6098314850761418000,
"line_mean": 30.9174311927,
"line_max": 69,
"alpha_frac": 0.5855130785,
"autogenerated": false,
"ratio": 3.404109589041096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489622667541096,
"avg_score": null,
"num_lines": null
} |
# applytheme.py
"""
Apply Theme
This script will apply the specified theme to all webs in the
specified web application.
Usage:
ipy applytheme.py --url http://myserver --theme Petal
Arguments:
--url - web application url
--theme - theme to apply
[--force] - apply the theme even if it is already applied
[--help] - display this message
"""
#
import sys
import sp
import scriptutil
__all__ = ["apply_theme"]
def main(argv):
args = scriptutil.getargs(argv, ["url=", "theme="], ["force"], __doc__, True)
apply_theme(args["url"], args["theme"], args.has_key("force"))
def apply_theme(url, theme, force=False):
"""Applies the theme to all webs within the web application"""
# always compare to lower case
theme = theme.lower()
def do_work(web):
# make sure we're comparing to lower case
wtheme = web.Theme.lower()
if (theme == "none" and wtheme != "") or (theme != "none" and wtheme != theme) or force == True:
print "Applying theme to", web.Url
web.ApplyTheme(theme)
# iterate over all sites, then all webs
sp.enum_sites(url,
lambda s:
sp.enum_all_webs(s, do_work))
if __name__ == '__main__':
main(sys.argv[1:])
| {
"repo_name": "glenc/sp.py",
"path": "src/applytheme.py",
"copies": "1",
"size": "1226",
"license": "bsd-3-clause",
"hash": -4057796243761332700,
"line_mean": 20.7037037037,
"line_max": 98,
"alpha_frac": 0.6247960848,
"autogenerated": false,
"ratio": 3.1435897435897435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42683858283897436,
"avg_score": null,
"num_lines": null
} |
# Apply the models to predict what ratings I will give for books on my To-Read list
from goodreads import client
import pandas as pd
import time
import pickle
from my_settings import api_key, api_secret, username # loading private information
from sklearn.externals import joblib
from sklearn.preprocessing import Imputer
gc = client.GoodreadsClient(api_key, api_secret)
user = gc.user(username=username)
# Get shelf
page = 1
data = list()
try:
while True:
r = gc.request("/review/list/{}.xml".format(user.gid), {'shelf': 'to-read', 'page': page}).get('books')
for book in r['book']:
data.append([book['title'],
book['authors']['author']['name'],
book['publication_year'],
book['average_rating'],
book['num_pages'],
book['ratings_count']])
page += 1
time.sleep(1)
except KeyError: # done with reviews
print('Could not grab additional reviews')
pass
columns = ['title', 'author', 'publication_year', 'average_rating', 'number_pages', 'number_ratings']
df_shelf = pd.DataFrame(data, columns=columns)
# Populate with author information
author_data = list()
for name in set(df_shelf['author']):
author = gc.find_author(name)
print(name)
try:
print author._author_dict
works = author.works_count
fans = author.fans_count()['#text']
gender = author.gender
except:
works, fans, gender = '', '', ''
author_data.append([name, works, fans, gender])
time.sleep(1)
columns = ['author', 'works', 'fans', 'gender']
df_author = pd.DataFrame(author_data, columns=columns)
# Merge with to-read books
df_shelf = pd.merge(df_shelf, df_author, how='left', left_on='author', right_on='author')
# Save for future use
with open('data/to-read_shelf.pkl', 'w') as f:
pickle.dump(df_shelf, f)
# ==========================================================
# Alternative: load up existing shelf
with open('data/to-read_shelf.pkl', 'r') as f:
df_shelf = pickle.load(f)
# Load up my prior reviews
with open('data/reviews.pkl', 'r') as f:
reviews = pickle.load(f)
# Process authors
data = df_shelf.copy()
read_authors = set(reviews['author'])
for author in set(data['author']):
if author in read_authors:
# print '{}: READ'.format(author.encode('utf-8').decode('ascii', errors='ignore'))
data.replace(author, 0, inplace=True)
else:
# print '{}: NEW'.format(author.encode('utf-8').decode('ascii', errors='ignore'))
data.replace(author, 1, inplace=True)
# Set gender to be 0/1. Nones are assumed male (0)
gender = {'male': 0, 'female': 1}
data.replace(to_replace=gender, inplace=True)
data.loc[data['gender'].isnull(), 'gender'] = 0
cols = ['author', 'publication_year', 'average_rating', 'number_pages', 'works', 'fans', 'number_ratings', 'gender']
data = data[cols].copy()
data.columns = ['Single-Read Authors'] + cols[1:]
# data.dropna(axis=0, inplace=True) # eliminate missing values
# Impute missing values
imp = Imputer(missing_values='NaN', strategy='mean', axis=0).fit(data)
data = pd.DataFrame(imp.transform(data), columns=data.columns)
# Load up models and apply them
rf = joblib.load('data/model/ratings_model.pkl')
rf_reg = joblib.load('data/model/positivity_model.pkl')
rating = rf.predict(data)
positivity = rf_reg.predict(data)
# Examine predictions
data['rating'] = rating
data['positivity'] = positivity
final = df_shelf[['title', 'author']].join(data).dropna(axis=0)
final.sort_values(by=['rating', 'average_rating'], axis=0, ascending=False).head(5)
final.sort_values(by=['positivity', 'average_rating'], axis=0, ascending=False).head(5)
# Intersection of top-rated, top-positive
pos_10 = final['positivity'].sort_values(ascending=False)[int(len(final)*0.1)+1] # 10%
best = final[(final['rating'] == 2) & (final['positivity'] > pos_10)] # 1 if binary 5-star/not, 2 if 5/4/less, or 5
cols = ['title', 'author', 'average_rating', 'positivity']
best['title'] = best['title'].apply(lambda x: x.split('(')[0].strip()) # eliminate series designation. Gives a warning
final_best = best[cols].sort_values(by=['positivity', 'average_rating'], axis=0, ascending=False)
nice_cols = ['Title', 'Author', 'Average Rating', 'Positivity']
final_best.columns = nice_cols
print(final_best)
final_best.to_html() | {
"repo_name": "dr-rodriguez/Exploring-Goodreads",
"path": "prediction.py",
"copies": "1",
"size": "4402",
"license": "mit",
"hash": -784526257825142100,
"line_mean": 35.0901639344,
"line_max": 119,
"alpha_frac": 0.6453884598,
"autogenerated": false,
"ratio": 3.2826249067859807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4428013366585981,
"avg_score": null,
"num_lines": null
} |
"""Apply the same action to the simulated and real A1 robot.
As a basic debug tool, this script allows you to execute the same action
(which you choose from the pybullet GUI) on the simulation and real robot
simultaneouly. Make sure to put the real robbot on rack before testing.
"""
from absl import app
from absl import logging
import numpy as np
import time
from tqdm import tqdm
import pybullet # pytype:disable=import-error
import pybullet_data
from pybullet_utils import bullet_client
from motion_imitation.robots import a1_robot
from motion_imitation.robots import robot_config
FREQ = 0.5
def main(_):
logging.info(
"WARNING: this code executes low-level controller on the robot.")
logging.info("Make sure the robot is hang on rack before proceeding.")
input("Press enter to continue...")
# Construct sim env and real robot
p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
robot = a1_robot.A1Robot(pybullet_client=p, action_repeat=1)
# Move the motors slowly to initial position
robot.ReceiveObservation()
current_motor_angle = np.array(robot.GetMotorAngles())
desired_motor_angle = np.array([0., 0.9, -1.8] * 4)
for t in tqdm(range(300)):
blend_ratio = np.minimum(t / 200., 1)
action = (1 - blend_ratio
) * current_motor_angle + blend_ratio * desired_motor_angle
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.005)
# Move the legs in a sinusoidal curve
for t in tqdm(range(1000)):
angle_hip = 0.9 + 0.2 * np.sin(2 * np.pi * FREQ * 0.01 * t)
angle_calf = -2 * angle_hip
action = np.array([0., angle_hip, angle_calf] * 4)
robot.Step(action, robot_config.MotorControlMode.POSITION)
time.sleep(0.007)
# print(robot.GetFootContacts())
print(robot.GetBaseVelocity())
robot.Terminate()
if __name__ == '__main__':
app.run(main)
| {
"repo_name": "google-research/motion_imitation",
"path": "motion_imitation/examples/a1_robot_exercise.py",
"copies": "1",
"size": "1931",
"license": "apache-2.0",
"hash": -5347985609148930000,
"line_mean": 32.2931034483,
"line_max": 73,
"alpha_frac": 0.7120662869,
"autogenerated": false,
"ratio": 3.245378151260504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4457444438160504,
"avg_score": null,
"num_lines": null
} |
"""Apply thresholds to arrays.
The following work on 2D arrays.
"""
from skimage import filter
def binary(parameters):
"""Applies a binary threshold to an array
:param parameters['data'][0]: input array
:type parameters['data'][0]: numpy.array
:param parameters['threshold']: the threshold value
:type parameters['threshold']: integer or float
:return: numpy.array, with dtype('uint8') containing 0 or 1 values
"""
data = parameters['data'][0]
threshold = parameters['threshold']
result = data > threshold
return result.astype('uint8')
def otsu(parameters):
"""Calculates the Otsu threshold and applies it to an array
It wraps `skimage.filter.threshold_otsu`. The `nbins` option is not
supported.
:param parameters['data'][0]: input array
:type parameters['data'][0]: numpy.array
:return: numpy.array, with dtype('uint8') containing 0 or 1 values
"""
data = parameters['data'][0]
threshold = filter.threshold_otsu(data)
result = data > threshold
return result.astype('uint8')
| {
"repo_name": "cpsaltis/pythogram-core",
"path": "src/gramcore/filters/thresholds.py",
"copies": "1",
"size": "1084",
"license": "mit",
"hash": -3915828157225141000,
"line_mean": 21.5833333333,
"line_max": 71,
"alpha_frac": 0.6697416974,
"autogenerated": false,
"ratio": 4.059925093632959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5229666791032959,
"avg_score": null,
"num_lines": null
} |
"""Apply transformations that work on (almost) all entities."""
import itertools
from collections import defaultdict
from srctools.bsp_transform import trans, Context
from srctools.logger import get_logger
from srctools import Output
from srctools.packlist import FileType
LOGGER = get_logger(__name__)
@trans('Attachment Points')
def att_points(ctx: Context) -> None:
"""Allow setting attachment points in a separate field to the parent name."""
for ent in ctx.vmf.entities:
if not ent['parent_attachment_point']:
continue
parent = ent['parentname'].rsplit(',', 1)[0]
if not parent:
LOGGER.warning(
'No parent, but attachment point set for "{}"? ({})',
ent['targetname'],
ent['origin'],
)
continue
ent['parentname'] = parent + ',' + ent['parent_attachment_point']
@trans('VScript Init Code')
def vscript_init_code(ctx: Context) -> None:
"""Add vscript_init_code keyvalues.
The specified code is appended as a script file to the end of the scripts.
vscript_init_code2, 3 etc will also be added in order if they exist.
"""
for ent in ctx.vmf.entities:
code = ent.keys.pop('vscript_init_code', '')
if not code:
continue
for i in itertools.count(2):
extra = ent.keys.pop('vscript_init_code' + str(i), '')
if not extra:
break
code += '\n' + extra
ctx.add_code(ent, code)
@trans('VScript RunScript Inputs')
def vscript_runscript_inputs(ctx: Context) -> None:
"""Handle RunScript* inputs.
For RunScriptCode, allow using quotes in the parameter.
This is done by using ` as a replacement for double-quotes,
then synthesising a script file and using RunScriptFile to execute it.
For RunScriptFile, ensure the file is packed.
"""
for ent in ctx.vmf.entities:
for out in ent.outputs:
inp_name = out.input.casefold()
if inp_name == 'runscriptfile':
ctx.pack.pack_file('scripts/vscripts/' + out.params, FileType.VSCRIPT_SQUIRREL)
if inp_name != 'runscriptcode':
continue
if '`' not in out.params:
continue
out.params = ctx.pack.inject_vscript(out.params.replace('`', '"'))
out.input = 'RunScriptFile'
@trans('Optimise logic_auto', priority=50)
def optimise_logic_auto(ctx: Context) -> None:
"""Merge logic_auto entities to simplify the map."""
# (global state) -> outputs
states = defaultdict(list) # type: dict[tuple[str, bool], list[Output]]
for auto in ctx.vmf.by_class['logic_auto']:
# If the auto uses any keys that we don't recognise, leave it alone.
# This catches stuff like it being named and in a template,
# VScript, or any other hijinks.
if any(
value and key.casefold() not in {
'origin', 'angles', 'spawnflags',
'globalstate',
}
for key, value in auto.keys.items()
):
continue
auto.remove()
state = auto['globalstate', '']
only_once = auto['spawnflags', '0'] == '1'
for out in auto.outputs:
# We know OnMapSpawn only happens once.
if out.output.casefold() == 'onmapspawn' or only_once:
out.only_once = True
states[state, out.only_once].append(out)
for (state, only_once), outputs in states.items():
ctx.vmf.create_ent(
classname='logic_auto',
globalstate=state,
origin='0 0 0',
spawnflags=only_once,
).outputs = outputs
@trans('Strip Entities', priority=50)
def strip_ents(ctx: Context) -> None:
"""Strip useless entities from the map."""
for clsname in [
# None of these are defined by the engine itself.
# If present they're useless.
'hammer_notes',
'func_instance_parms',
'func_instance_origin',
]:
for ent in ctx.vmf.by_class[clsname]:
ent.remove()
# Strip extra keys added in the engine.
to_remove: list[str] = []
for ent in ctx.vmf.entities:
to_remove.clear()
for key, value in ent.keys.items():
if 'divider' in key and value == "":
to_remove.append(key)
for key in to_remove:
del ent.keys[key]
| {
"repo_name": "TeamSpen210/srctools",
"path": "srctools/bsp_transform/globals.py",
"copies": "1",
"size": "4472",
"license": "unlicense",
"hash": -1881577694851794700,
"line_mean": 31.8823529412,
"line_max": 95,
"alpha_frac": 0.5843023256,
"autogenerated": false,
"ratio": 3.9331574318381706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016626315160210426,
"num_lines": 136
} |
# Apply White or Black Background Mask
import cv2
from . import print_image
from . import plot_image
from . import fatal_error
def apply_mask(img, mask, mask_color, device, debug=None):
"""Apply white image mask to image, with bitwise AND operator bitwise NOT operator and ADD operator.
Inputs:
img = image object, color(RGB)
mask = image object, binary (black background with white object)
mask_color = white or black
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
device = device number
masked_img = masked image
:param img: numpy array
:param mask: numpy array
:param mask_color: str
:param device: int
:param debug: str
:return device: int
:return masked_img: numpy array
"""
device += 1
if mask_color == 'white':
# Mask image
masked_img = cv2.bitwise_and(img, img, mask=mask)
# Create inverted mask for background
mask_inv = cv2.bitwise_not(mask)
# Invert the background so that it is white, but apply mask_inv so you don't white out the plant
white_mask = cv2.bitwise_not(masked_img, mask=mask_inv)
# Add masked image to white background (can't just use mask_inv because that is a binary)
white_masked = cv2.add(masked_img, white_mask)
if debug == 'print':
print_image(white_masked, (str(device) + '_wmasked.png'))
elif debug == 'plot':
plot_image(white_masked)
return device, white_masked
elif mask_color == 'black':
masked_img = cv2.bitwise_and(img, img, mask=mask)
if debug == 'print':
print_image(masked_img, (str(device) + '_bmasked.png'))
elif debug == 'plot':
plot_image(masked_img)
return device, masked_img
else:
fatal_error('Mask Color' + str(mask_color) + ' is not "white" or "black"!')
| {
"repo_name": "AntonSax/plantcv",
"path": "plantcv/apply_mask.py",
"copies": "2",
"size": "1999",
"license": "mit",
"hash": 8563320097765894000,
"line_mean": 35.3454545455,
"line_max": 104,
"alpha_frac": 0.6233116558,
"autogenerated": false,
"ratio": 3.8076190476190477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5430930703419047,
"avg_score": null,
"num_lines": null
} |
# Apply White or Black Background Mask
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv.transform import rescale
def apply_mask(img, mask, mask_color):
"""Apply white image mask to image, with bitwise AND operator bitwise NOT operator and ADD operator.
Inputs:
img = RGB image data
mask = Binary mask image data
mask_color = 'white' or 'black'
Returns:
masked_img = masked image data
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param mask_color: str
:return masked_img: numpy.ndarray
"""
params.device += 1
if mask_color.upper() == "WHITE":
color_val = 255
elif mask_color.upper() == "BLACK":
color_val = 0
else:
fatal_error('Mask Color ' + str(mask_color) + ' is not "white" or "black"!')
array_data = img.copy()
# Mask the array
array_data[np.where(mask == 0)] = color_val
# Check the array data format
if len(np.shape(array_data)) > 2 and np.shape(array_data)[-1] > 3:
# Replace this part with _make_pseudo_rgb
num_bands = np.shape(array_data)[2]
med_band = int(num_bands / 2)
debug = params.debug
params.debug = None
pseudo_rgb = cv2.merge((rescale(array_data[:, :, 0]),
rescale(array_data[:, :, med_band]),
rescale(array_data[:, :, num_bands - 1])))
params.debug = debug
if params.debug == 'print':
print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + '_masked.png'))
elif params.debug == 'plot':
plot_image(pseudo_rgb)
else:
if params.debug == 'print':
print_image(array_data, os.path.join(params.debug_outdir, str(params.device) + '_masked.png'))
elif params.debug == 'plot':
plot_image(array_data)
return array_data
| {
"repo_name": "stiphyMT/plantcv",
"path": "plantcv/plantcv/apply_mask.py",
"copies": "2",
"size": "2068",
"license": "mit",
"hash": -8752908898814727000,
"line_mean": 30.3333333333,
"line_max": 106,
"alpha_frac": 0.6039651838,
"autogenerated": false,
"ratio": 3.640845070422535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5244810254222535,
"avg_score": null,
"num_lines": null
} |
"""App middleware."""
import httplib
import logging
import sys
import webapp2
from webapp2_extras import securecookie
from webapp2_extras import security
from webapp2_extras import sessions
from . import error
from error import Abort
from . import model
from . import settings
from . import shared
from google.appengine.api import users
# session key to store the anonymous user object
_ANON_USER_KEY = u'anon_user_key'
# AngularJS XSRF Cookie, see http://docs.angularjs.org/api/ng.$http
_XSRF_TOKEN_COOKIE = 'XSRF-TOKEN'
# AngularJS XSRF HTTP Header, see http://docs.angularjs.org/api/ng.$http
_XSRF_TOKEN_HEADER = 'HTTP_X_XSRF_TOKEN'
def MakeCookieHeader(name, value, cookie_args=None):
items = ['{}={}'.format(name, value)]
items.append('Path=/')
if cookie_args:
if cookie_args['secure']:
items.append('secure')
if cookie_args['httponly']:
items.append('HttpOnly')
cookie_header = ('set-cookie', '; '.join(items))
return cookie_header
# TODO: use datastore sequence instead
def MakeAnonUserKey():
suffix = security.generate_random_string(
length=10,
pool=security.LOWERCASE_ALPHANUMERIC)
return 'user_{0}'.format(suffix)
def AdoptAnonymousProjects(dest_user_key, source_user_key):
model.AdoptProjects(dest_user_key, source_user_key)
def GetOrMakeSession(request):
"""Get a new or current session."""
session_store = sessions.get_store(request=request)
session = session_store.get_session()
if not session:
session['xsrf'] = security.generate_random_string(entropy=128)
user = users.get_current_user()
if user:
if _ANON_USER_KEY in session:
AdoptAnonymousProjects(user.email(), session[_ANON_USER_KEY])
del session[_ANON_USER_KEY]
else:
if _ANON_USER_KEY not in session:
session[_ANON_USER_KEY] = MakeAnonUserKey()
return session
def GetUserKey(session):
"""Returns the email from logged in user or the session user key."""
user = users.get_current_user()
if user:
return user.email()
return session[_ANON_USER_KEY]
def _PerformCsrfRequestValidation(session, environ):
session_xsrf = session['xsrf']
client_xsrf = environ.get(_XSRF_TOKEN_HEADER)
if not client_xsrf:
Abort(httplib.UNAUTHORIZED, 'Missing client XSRF token.')
if client_xsrf != session_xsrf:
# do not log tokens in production
if shared.IsDevMode():
logging.error('Client XSRF token={0!r}, session XSRF token={1!r}'
.format(client_xsrf, session_xsrf))
Abort(httplib.UNAUTHORIZED,
'Client XSRF token does not match session XSRF token.')
class Session(object):
"""WSGI middleware which adds user/project sessions.
Adds the following keys to the environ:
- environ['app.session'] contains a webapp2 session
- environ['app.user'] contains the current user entity
"""
def __init__(self, app, config):
self.app = app
self.app.config = webapp2.Config(config)
secret_key = config['webapp2_extras.sessions']['secret_key']
self.serializer = securecookie.SecureCookieSerializer(secret_key)
def MakeSessionCookieHeader(self, session):
value = self.serializer.serialize(settings.SESSION_COOKIE_NAME,
dict(session))
value = '"{}"'.format(value)
return MakeCookieHeader(settings.SESSION_COOKIE_NAME, value,
settings.SESSION_COOKIE_ARGS)
def MakeXsrfCookieHeader(self, session):
return MakeCookieHeader(_XSRF_TOKEN_COOKIE, session['xsrf'],
settings.XSRF_COOKIE_ARGS)
def __call__(self, environ, start_response):
additional_headers = []
# pylint:disable-msg=invalid-name
def custom_start_response(status, headers, exc_info=None):
headers.extend(additional_headers)
# keep session cookies private
headers.extend([
# Note App Engine automatically sets a 'Date' header for us. See
# https://developers.google.com/appengine/docs/python/runtime#Responses
('Expires', settings.LONG_AGO),
('Cache-Control', 'private, max-age=0'),
])
return start_response(status, headers, exc_info)
# 1. ensure we have a session
request = webapp2.Request(environ, app=self.app)
session = environ['app.session'] = GetOrMakeSession(request)
if session.modified:
additional_headers.extend([
self.MakeSessionCookieHeader(session),
self.MakeXsrfCookieHeader(session),
])
# 2. ensure we have an user entity
user_key = GetUserKey(session)
assert user_key
# TODO: avoid creating a datastore entity on every anonymous request
environ['app.user'] = model.GetOrCreateUser(user_key)
# 3. perform CSRF checks
if not shared.IsHttpReadMethod(environ):
_PerformCsrfRequestValidation(session, environ)
return self.app(environ, custom_start_response)
class PlaintextSecretExtractor(object):
"""WSGI middleware which extracts plaintext secrets.
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
request = webapp2.Request(environ, app=self.app)
plaintext_secret = request.get('plaintext_secret')
if plaintext_secret:
environ['plaintext_secret'] = 'plaintext_secret'
return self.app(environ, start_response)
class ErrorHandler(object):
"""WSGI middleware which adds AppError handling."""
def __init__(self, app, debug):
self.app = app
self.debug = debug
def __call__(self, environ, start_response):
if shared.IsDevMode():
logging.info('\n' * 1)
try:
return self.app(environ, start_response)
except Exception, e: # pylint:disable-msg=broad-except
status, headers, body = error.MakeErrorResponse(e, self.debug)
start_response(status, headers, sys.exc_info())
return body
| {
"repo_name": "fredsa/instant-tty",
"path": "gae/middleware.py",
"copies": "1",
"size": "5826",
"license": "apache-2.0",
"hash": 4269562291080496000,
"line_mean": 29.8253968254,
"line_max": 81,
"alpha_frac": 0.6850326124,
"autogenerated": false,
"ratio": 3.6641509433962263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9804191642990294,
"avg_score": 0.008998382561186187,
"num_lines": 189
} |
# app/miembros/forms.py
# coding: utf-8
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, HiddenField, SelectField
from wtforms import DateField, BooleanField
from wtforms.validators import InputRequired, Length, Optional, DataRequired
class MiembroForm(FlaskForm):
"""
Formulario para familias
"""
id = HiddenField("id")
id_direccion = HiddenField("idDir")
# Modelo Familia
nombres = StringField(u'Nombre/s',
validators=[DataRequired(),
Length(min=1, max=100)])
apellidos = StringField(u'Apellido/s',
validators=[DataRequired(),
Length(min=1, max=100)])
dni_doc = StringField(u'DNI/Doc.',
validators=[Length(min=0, max=20)])
email = StringField(u'Email (si son niños poner el email\
de alguno de los padres/tutores)',
validators=[DataRequired(),
Length(min=0, max=60)])
telefono_movil = StringField(u'Móvil',
validators=[Length(min=0, max=15)])
telefono_fijo = StringField(u'Fijo',
validators=[Length(min=0, max=15)])
fecha_nac = DateField(u'Fecha de Nacimiento', validators=[DataRequired()])
fecha_inicio_icecha = DateField(u'Fecha de Inicio en Iglesia',
validators=[Optional()])
fecha_miembro = DateField(u'Fecha de Membresía', validators=[Optional()])
fecha_bautismo = DateField(u'Fecha de Bautismo', validators=[Optional()])
lugar_bautismo = StringField(u'Lugar de Bautismo',
validators=[Length(min=0, max=50)])
hoja_firmada = BooleanField(u'¿Tiene firmada la hoja de membresía?')
nro_hoja = StringField(u'# de Hoja de Membresía (formato AAAA-NRO)')
observaciones = StringField(u'Observaciones',
validators=[Length(min=0, max=500)])
EstadoCivil = SelectField(u'Estado Civil', coerce=int)
TipoMiembro = SelectField(u'Tipo de Miembro', coerce=int)
RolFamiliar = SelectField(u'Rol Familiar', coerce=int)
Familia = SelectField(u'Familia', coerce=int)
GrupoCasero = SelectField(u'Grupo Casero', coerce=int)
submit = SubmitField(u'Aceptar')
class DireccionModalForm(FlaskForm):
# Modelo Direccion
tipo_via = StringField(u'Tipo de vía',
validators=[InputRequired(),
Length(min=1, max=20)])
nombre_via = StringField(u'Nombre de la vía',
validators=[InputRequired(),
Length(min=1, max=100)])
nro_via = StringField(u'Nro',
validators=[InputRequired(),
Length(min=1, max=10)])
portalescalotros_via = StringField(u'Portal/Esc/Otro')
piso_nroletra_via = StringField(u'Nro/Letra del Piso')
cp_via = StringField(u'CP',
validators=[InputRequired(),
Length(min=1, max=10)])
ciudad_via = StringField(u'Ciudad',
validators=[InputRequired(),
Length(min=1, max=50)])
provincia_via = StringField(u'Provincia',
validators=[InputRequired(),
Length(min=1, max=50)])
pais_via = StringField(u'País',
validators=[InputRequired(),
Length(min=1, max=50)])
submit = SubmitField(u'Crear Dirección')
class AsignarRolesForm(FlaskForm):
id_persona = HiddenField("id")
flag_rol = HiddenField("flag")
preselectedoptions = SelectField('Ids IN')
btnAsignarRoles = SubmitField(u'Aceptar')
| {
"repo_name": "originaltebas/chmembers",
"path": "app/miembros/forms.py",
"copies": "1",
"size": "4059",
"license": "mit",
"hash": -8986223490170150000,
"line_mean": 39.3163265306,
"line_max": 78,
"alpha_frac": 0.5381575698,
"autogenerated": false,
"ratio": 3.787652011225444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4825809581025444,
"avg_score": null,
"num_lines": null
} |
# app/miembros/views.py
# coding: utf-8
from flask import flash, jsonify
from flask import redirect, render_template, url_for, request
from flask_login import current_user, login_required
from sqlalchemy import func
from app.miembros import miembros
from app.miembros.forms import MiembroForm, DireccionModalForm
from app.miembros.forms import AsignarRolesForm
from app import db
from app.models import Miembro, Direccion, relacion_miembros_roles
from app.models import Rol, EstadoCivil, TipoMiembro, RolFamiliar
from app.models import Familia, GrupoCasero
def check_edit_or_admin():
"""
Si no es admin o editor lo manda al inicio
"""
if not current_user.get_urole() >= 1:
return redirect(url_for("home.hub"))
@miembros.route('/miembros', defaults={'cadena': ""}, methods=['GET'])
@miembros.route('/miembros/<string:cadena>', methods=['GET'])
@login_required
def ver_miembros(cadena):
"""
Ver una lista de todos los miembros
"""
check_edit_or_admin()
flag_listar = True
nro_roles = db.session.query(Miembro.id,
func.count(Rol.id).label('contar'))\
.outerjoin(relacion_miembros_roles,
Miembro.id ==
relacion_miembros_roles.c.id_miembro)\
.outerjoin(Rol,
Rol.id ==
relacion_miembros_roles.c.id_rol)\
.group_by(Miembro).subquery()
query_miembros = db.session.query(Miembro)\
.outerjoin(Direccion,
Miembro.id_direccion ==
Direccion.id)\
.outerjoin(TipoMiembro,
Miembro.id_tipomiembro ==
TipoMiembro.id)\
.outerjoin(nro_roles,
Miembro.id ==
nro_roles.c.id)\
.add_columns(
Miembro.id,
Miembro.apellidos,
Miembro.nombres,
Miembro.email,
Miembro.telefono_fijo,
Miembro.telefono_movil,
Miembro.id_familia,
Miembro.id_grupocasero,
TipoMiembro.nombre_tipomiembro,
Direccion.tipo_via,
Direccion.nombre_via,
Direccion.nro_via,
Direccion.portalescalotros_via,
Direccion.cp_via,
Direccion.ciudad_via,
Direccion.provincia_via,
Direccion.pais_via,
nro_roles.c.contar)
if cadena != "":
cadena = "%{}%".format(cadena)
query_miembros = query_miembros.filter(Miembro.fullname.like(cadena))
return render_template('miembros/base_miembros.html',
miembros=query_miembros,
flag_listar=flag_listar)
@miembros.route('/miembros/crear', methods=['GET', 'POST'])
@login_required
def crear_miembro():
"""
Agregar un miembro a la Base de Datos
"""
check_edit_or_admin()
# Variable para el template. Para decirle si es Alta o Modif
flag_crear = True
flag_listar = False
form = MiembroForm()
form.EstadoCivil.choices = [(row.id, row.nombre_estado)
for row in EstadoCivil.query.all()]
form.TipoMiembro.choices = [(row.id, row.nombre_tipomiembro)
for row in TipoMiembro.query.all()]
form.RolFamiliar.choices = [(row.id, row.nombre_rolfam)
for row in RolFamiliar.query.all()]
form.Familia.choices = [(row.id, row.apellidos_familia)
for row in Familia.query.all()]
form.GrupoCasero.choices = [(row.id, row.nombre_grupo)
for row in GrupoCasero.query.all()]
if request.method == "POST":
if form.validate_on_submit():
obj_miembro = Miembro(nombres=form.nombres.data,
apellidos=form.apellidos.data,
dni_doc=form.dni_doc.data,
email=form.email.data,
telefono_movil=form.telefono_movil.data,
telefono_fijo=form.telefono_fijo.data,
fecha_nac=form.fecha_nac.data,
fecha_inicio_icecha=form.fecha_inicio_icecha.data,
fecha_miembro=form.fecha_miembro.data,
fecha_bautismo=form.fecha_bautismo.data,
lugar_bautismo=form.lugar_bautismo.data,
hoja_firmada=form.hoja_firmada.data,
nro_hoja=form.nro_hoja.data,
observaciones=form.observaciones.data,
id_estadocivil=form.EstadoCivil.data,
id_tipomiembro=form.TipoMiembro.data,
id_rolfamiliar=form.RolFamiliar.data,
id_familia=form.Familia.data,
id_grupocasero=form.GrupoCasero.data,
id_direccion=form.id_direccion.data)
try:
db.session.add(obj_miembro)
db.session.commit()
flash('Has guardado los datos correctamente', 'success')
status = 'ok'
except Exception as e:
flash('Error: ' + str(e), 'danger')
status = 'ko'
# return submited y validated
url = url_for('miembros.ver_miembros')
return jsonify(status=status, url=url)
else:
# validation error
status = 'val'
url = url_for('miembros.crear_miembro')
er = ""
for field, errors in form.errors.items():
for error in errors:
er = er + "Campo: " +\
getattr(form, field).label.text +\
" - Error: " +\
error + "<br/>"
return jsonify(status=status, url=url, errors=er)
else:
# get
return render_template('miembros/base_miembros.html',
flag_crear=flag_crear,
flag_listar=flag_listar, form=form)
@miembros.route('/miembros/modificar/<int:id>',
methods=['GET', 'POST'])
@login_required
def modif_miembro(id):
"""
Modificar un miembro
"""
check_edit_or_admin()
flag_crear = False
flag_listar = False
# lo hago por partes para actualizar más facil
# la dir si se crea una nueva
obj_miembro = Miembro.query.get_or_404(id)
form_miembro = MiembroForm(obj=obj_miembro)
form_miembro.EstadoCivil.choices = [(row.id, row.nombre_estado)
for row in EstadoCivil.query.all()]
form_miembro.TipoMiembro.choices = [(row.id, row.nombre_tipomiembro)
for row in TipoMiembro.query.all()]
form_miembro.RolFamiliar.choices = [(row.id, row.nombre_rolfam)
for row in RolFamiliar.query.all()]
form_miembro.Familia.choices = [(row.id, row.apellidos_familia)
for row in Familia.query.all()]
form_miembro.GrupoCasero.choices = [(row.id, row.nombre_grupo)
for row in GrupoCasero.query.all()]
if request.method == 'GET':
obj_dir = Direccion.query.get_or_404(obj_miembro.id_direccion)
form_dir = DireccionModalForm(obj=obj_dir)
form_miembro.id_direccion.data = obj_miembro.id_direccion
form_miembro.EstadoCivil.data = obj_miembro.id_estadocivil
form_miembro.TipoMiembro.data = obj_miembro.id_tipomiembro
form_miembro.RolFamiliar.data = obj_miembro.id_rolfamiliar
form_miembro.Familia.data = obj_miembro.id_familia
form_miembro.GrupoCasero.data = obj_miembro.id_grupocasero
return render_template('miembros/base_miembros.html',
flag_crear=flag_crear,
flag_listar=flag_listar,
form=form_miembro,
form_dir=form_dir)
elif request.method == "POST":
if form_miembro.validate_on_submit():
obj_miembro.nombres = form_miembro.nombres.data
obj_miembro.apellidos = form_miembro.apellidos.data
obj_miembro.dni_doc = form_miembro.dni_doc.data
obj_miembro.email = form_miembro.email.data
obj_miembro.telefono_movil = form_miembro.telefono_movil.data
obj_miembro.telefono_fijo = form_miembro.telefono_fijo.data
obj_miembro.fecha_nac = form_miembro.fecha_nac.data
obj_miembro.fecha_inicio_icecha = form_miembro.fecha_inicio_icecha.data
obj_miembro.fecha_miembro = form_miembro.fecha_miembro.data
obj_miembro.fecha_bautismo = form_miembro.fecha_bautismo.data
obj_miembro.lugar_bautismo = form_miembro.lugar_bautismo.data
obj_miembro.hoja_firmada = form_miembro.hoja_firmada.data
obj_miembro.nro_hoja = form_miembro.nro_hoja.data
obj_miembro.observaciones = form_miembro.observaciones.data
obj_miembro.id_estadocivil = form_miembro.EstadoCivil.data
obj_miembro.id_tipomiembro = form_miembro.TipoMiembro.data
obj_miembro.id_rolfamiliar = form_miembro.RolFamiliar.data
obj_miembro.id_familia = form_miembro.Familia.data
obj_miembro.id_grupocasero = form_miembro.GrupoCasero.data
obj_miembro.id_direccion = form_miembro.id_direccion.data
try:
# confirmo todos los datos en la db
db.session.commit()
flash('Has guardado los datos correctamente', 'success')
status = 'ok'
except Exception as e:
flash('Error: ' + str(e), 'danger')
status = 'ko'
# return submited y validated
url = url_for('miembros.ver_miembros')
return jsonify(status=status, url=url)
else:
# validation error
status = 'val'
url = url_for('miembros.ver_miembros')
er = ""
for field, errors in form_miembro.errors.items():
for error in errors:
er = er + "Campo: " +\
getattr(form_miembro, field).label.text +\
" - Error: " +\
error + "<br/>"
return jsonify(status=status, url=url, errors=er)
@miembros.route('/miembros/borrar/<int:id>',
methods=['GET'])
@login_required
def borrar_miembro(id):
"""
Borrar una miembro
"""
check_edit_or_admin()
obj_miembro = Miembro.query.get_or_404(id)
try:
db.session.delete(obj_miembro)
db.session.commit()
flash('Has borrado los datos correctamente.', 'success')
except Exception as e:
flash('Error: ' + str(e), 'danger')
return redirect(url_for('miembros.ver_miembros'))
@miembros.route('/miembros/asignar/rol/listar',
methods=['GET'])
@login_required
def listar_asignar_roles():
"""
Ver una lista de todos los miembros
"""
check_edit_or_admin()
roles = db.session.query(Rol).outerjoin(relacion_miembros_roles,
relacion_miembros_roles.c.id_rol ==
Rol.id)\
.outerjoin(Miembro,
Miembro.id ==
relacion_miembros_roles.c.id_miembro)\
.add_columns(
Miembro.id,
Rol.nombre_rol,
Rol.tipo_rol
)
query_miembros = db.session.query(Miembro)\
.outerjoin(Direccion,
Miembro.id_direccion ==
Direccion.id)\
.outerjoin(TipoMiembro,
Miembro.id_tipomiembro ==
TipoMiembro.id)\
.add_columns(Miembro.id,
Miembro.fullname,
Miembro.email,
Miembro.telefono_fijo,
Miembro.telefono_movil,
TipoMiembro.nombre_tipomiembro,
Direccion.tipo_via,
Direccion.nombre_via,
Direccion.nro_via,
Direccion.portalescalotros_via,
Direccion.cp_via,
Direccion.ciudad_via,
Direccion.provincia_via,
Direccion.pais_via)
return render_template('miembros/listado_asignar_roles.html',
informes=query_miembros, roles=roles)
@miembros.route('/miembros/asignar/rol/<string:flag>/<int:id>',
methods=['GET'])
@login_required
def asignar_roles(flag, id):
"""
Ver una lista de todos los miembros
"""
check_edit_or_admin()
form = AsignarRolesForm()
if flag == 'R':
tiporol_s = 'Responsabilidad'
tiporol_p = 'Responsabilidades'
elif flag == 'M':
tiporol_s = 'Ministerio'
tiporol_p = 'Ministerios'
else:
tiporol_s = 'Clase'
tiporol_p = 'Clases'
roles = db.session.query(Rol)\
.filter(Rol.tipo_rol == flag)\
.add_columns(Rol.id,
Rol.nombre_rol).all()
persona = db.session.query(Miembro)\
.filter(Miembro.id == id)\
.add_columns(Miembro.id,
Miembro.fullname)\
.one()
roles_persona = db.session.query(relacion_miembros_roles)\
.join(Rol,
Rol.id ==
relacion_miembros_roles.c.id_rol)\
.join(Miembro,
Miembro.id ==
relacion_miembros_roles.c.id_miembro)\
.filter(Miembro.id == id)\
.filter(Rol.tipo_rol == flag)\
.add_columns(relacion_miembros_roles.c.id_rol,
relacion_miembros_roles.c.id_miembro)\
.all()
return render_template('miembros/asignar_roles_a_persona.html',
roles=roles, persona=persona,
roles_persona=roles_persona,
tiporol_s=tiporol_s, tiporol_p=tiporol_p,
form=form, flag=flag)
@miembros.route('/miembros/asignar/rol/guardar',
methods=['GET', 'POST'])
@login_required
def guardar_roles():
"""
Ver una lista de todos los miembros
"""
check_edit_or_admin()
form = AsignarRolesForm()
if request.method == "POST":
if form.is_submitted():
persona = db.session.query(Miembro)\
.filter(Miembro.id ==
form.id_persona.data).one()
# Cojo los actuales para eliminarlos
objdel = db.session.query(Rol)\
.join(relacion_miembros_roles,
Rol.id ==
relacion_miembros_roles.c.id_rol)\
.filter(relacion_miembros_roles.c.id_miembro ==
form.id_persona.data)\
.filter(Rol.tipo_rol == form.flag_rol.data)\
.all()
ids = request.form.getlist('preselectedoptions')
# Cojo los nuevos para agregarlos
objadd = db.session.query(Rol)\
.filter(Rol.id.in_(ids)).all()
for o in objdel:
persona.roles.remove(o)
db.session.delete(persona)
for i in objadd:
persona.roles.append(i)
db.session.add(persona)
try:
db.session.commit()
status = 'ok'
except Exception as e:
flash('Error: ' + str(e), 'danger')
status = 'ko'
url = url_for('miembros.listar_asignar_roles')
return jsonify(status=status, url=url)
else:
# validation error
status = 'val'
url = url_for('miembros.asignar_roles')
er = ""
for field, errors in form.errors.items():
for error in errors:
er = er + "Campo: " +\
getattr(form, field).label.text +\
" - Error: " +\
error + "<br/>"
return jsonify(status=status, url=url, errors=er)
| {
"repo_name": "originaltebas/chmembers",
"path": "app/miembros/views.py",
"copies": "1",
"size": "19032",
"license": "mit",
"hash": 6993028745897006000,
"line_mean": 41.056561086,
"line_max": 84,
"alpha_frac": 0.4598287005,
"autogenerated": false,
"ratio": 3.6367284540416587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4596557154541659,
"avg_score": null,
"num_lines": null
} |
# app/models.py
from app import db
from sqlalchemy import Column, DateTime
from datetime import datetime
class Blog(db.Model):
__tablename__ = "articles"
id = db.Column(db.Integer, primary_key=True)
article_title = db.Column(db.String, nullable=False)
article_author = db.Column(db.String, nullable=False)
pub_date = db.Column(DateTime, default=datetime.utcnow)
article_content = db.Column(db.String, nullable=False)
def __init__(self, title, author, content):
self.article_title = title
self.article_author = author
self.article_content = content
def __repr__(self):
return 'title {}'.format(self.name)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String, unique=True, nullable=False)
first_name = db.Column(db.String, nullable=False)
last_name = db.Column(db.String, nullable=True)
email = db.Column(db.String, unique=True, nullable=False)
password_plaintext = db.Column(db.String, nullable=False) ######TEMPORARY -- MUST HASH THE PASSWORD AND DELETE
def __init__(self, username, first_name, last_name, email, password_plaintext):
self.username = username
self.first_name = first_name
self.last_name = last_name
self.email = email
self.password_plaintext = password_plaintext
def __repr__(self):
return '<User {0}>'.format(self.name)
| {
"repo_name": "weldon0405/weldon-blog",
"path": "app/models.py",
"copies": "1",
"size": "1493",
"license": "mit",
"hash": -1141020366648931200,
"line_mean": 29.4897959184,
"line_max": 114,
"alpha_frac": 0.6644340255,
"autogenerated": false,
"ratio": 3.650366748166259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.975062722169324,
"avg_score": 0.012834710394603916,
"num_lines": 49
} |
# app/models.py
from colorfield.fields import ColorField
from django.db import models
nullable = {'blank': True, 'null': True}
class SourceFile(models.Model):
project = models.CharField(max_length=200, **nullable)
name = models.CharField(max_length=200)
path = models.CharField(max_length=200)
language = models.CharField(max_length=12)
num_lines = models.IntegerField()
@classmethod
def projects(cls):
# pylint: disable=no-member
projects = cls.objects.values(
'project').distinct(
).order_by('project').values_list('project', flat=True)
return list(projects)
# {'kind': 'function', 'end': '384', 'language': 'Python',
# 'access': 'private', 'file': '', 'signature': '(x, y)',
# 'scope': 'function:TestRoutes.invoke.create_app', 'line': '383'}
class Symbol(models.Model):
source_file = models.ForeignKey(SourceFile)
label = models.CharField(max_length=200)
line_number = models.IntegerField()
# kind = models.CharField(max_length=12)
# length = models.IntegerField()
# DEPRECATED:
class SourceLine(models.Model):
project = models.CharField(max_length=200, **nullable)
name = models.CharField(max_length=200)
path = models.CharField(max_length=200)
line_number = models.IntegerField()
kind = models.CharField(max_length=12)
length = models.IntegerField()
def __unicode__(self):
return '<{} {} {}:{}>'.format(
self.__class__.__name__, self.name,
self.path, self.line_number)
@classmethod
def projects(cls):
# pylint: disable=no-member
projects = cls.objects.values(
'project').distinct(
).values_list('project', flat=True)
return sorted(filter(None, projects))
@classmethod
def paths(cls, project):
# pylint: disable=no-member
# X: sort?
source = cls.objects.filter(project=project)
return source.values('path').distinct().values_list(
'path', flat=True)
class ProgRadon(models.Model):
sourceline = models.OneToOneField(
SourceLine, on_delete=models.CASCADE,
**nullable)
kind = models.CharField(max_length=1)
complexity = models.IntegerField()
class ProgPmccabe(models.Model):
sourceline = models.OneToOneField(
SourceLine, on_delete=models.CASCADE,
**nullable)
modified_mccabe = models.IntegerField()
mccabe = models.IntegerField()
num_statements = models.IntegerField()
first_line = models.IntegerField()
num_lines = models.IntegerField()
definition_line = models.IntegerField()
class Skeleton(models.Model):
sourceline = models.ForeignKey(SourceLine, on_delete=models.CASCADE,
**nullable)
position = models.IntegerField()
x = models.IntegerField()
y = models.IntegerField()
color = ColorField()
| {
"repo_name": "johntellsall/shotglass",
"path": "shotglass/app/models.py",
"copies": "1",
"size": "2877",
"license": "mit",
"hash": -9017397917596020000,
"line_mean": 28.0606060606,
"line_max": 72,
"alpha_frac": 0.6478971151,
"autogenerated": false,
"ratio": 3.8308921438082555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9976264006383003,
"avg_score": 0.000505050505050505,
"num_lines": 99
} |
# app/models.py
import datetime
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql.functions import sum
from . import bcrypt
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
tag_categories_tags = db.Table('tag_categories_tags',
db.Column('tag_category_id',
db.Integer(),
db.ForeignKey('tag_category.id')
),
db.Column('tag_id', db.Integer(),
db.ForeignKey('tag.id')))
components_tags = db.Table('components_tags',
db.Column('component_id',
db.Integer(),
db.ForeignKey('component.id')),
db.Column('tag_id',
db.Integer(),
db.ForeignKey('tag.id')))
components_pictures = db.Table('components_pictures',
db.Column('component_id',
db.Integer(),
db.ForeignKey('component.id')),
db.Column('picture_id',
db.Integer(),
db.ForeignKey('picture.id')))
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
date_create = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime,
default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
transactions = db.relationship('Transaction', backref='user',
lazy='joined')
purchase_orders = db.relationship('PurchaseOrder', backref='user',
lazy='joined')
def __init__(self, email, password, admin=False):
self.email = email
self.password = bcrypt.generate_password_hash(password)
self.registered_on = datetime.datetime.now()
self.admin = admin
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User {0}>'.format(self.email)
class Vendor(db.Model):
__tablename__ = "vendor"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(120), index=True, unique=True)
contact = db.Column(db.String(120))
phone = db.Column(db.String(15))
website = db.Column(db.String(120))
address_id = db.Column(db.Integer, db.ForeignKey('address.id'))
def __repr__(self):
return '<Vendor {0}>'.format(self.name)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Address(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
line1 = db.Column(db.String(120))
line2 = db.Column(db.String(120))
city = db.Column(db.String(120))
state = db.Column(db.String(2))
zipcode = db.Column(db.String(16))
vendor = db.relationship("Vendor", backref='address')
class PurchaseOrder(db.Model):
__tablename__ = "purchase_order"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
created_on = db.Column(db.DateTime, nullable=False)
vendor_id = db.Column(db.Integer, db.ForeignKey('vendor.id'),
nullable=False)
vendor = db.relationship('Vendor', backref="purchase_order",
lazy="joined")
shipping = db.Column(db.Numeric(12, 2), nullable=False, default=0.00)
tax = db.Column(db.Numeric(12, 2), nullable=False, default=0.00)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'),
nullable=False)
@hybrid_property
def sub_total(self):
price = 0
for line in self.line_item:
price += line.total_price
return price
@hybrid_property
def total(self):
return self.sub_total + self.shipping + self.tax
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def __repr__(self):
return dir(self)
class VendorComponent(db.Model):
__tablename__ = "vendor_component"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
sku = db.Column(db.String(5), unique=False, nullable=False)
description = db.Column(db.String(), nullable=False)
vendor = db.relationship("Vendor", backref='vendor',
lazy="joined")
vendor_id = db.Column(db.Integer, db.ForeignKey('vendor.id'),
nullable=False)
class LineItem(db.Model):
__tablename__ = "line_item"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
quantity = db.Column(db.Integer, nullable=False)
total_price = db.Column(db.Numeric(12, 2), nullable=False)
purchase_order_id = db.Column(db.Integer,
db.ForeignKey('purchase_order.id'),
nullable=False)
vendor_component_id= db.Column(db.Integer,
db.ForeignKey('vendor_component.id'),
nullable=False)
purchase_order = db.relationship("PurchaseOrder", backref='line_item',
lazy="joined")
vendor_component = db.relationship("VendorComponent", uselist=False)
@hybrid_property
def unit_price(self):
return self.total_price / self.quantity
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Component(db.Model):
__tablename__ = "component"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
sku = db.Column(db.String(5), unique=True, nullable=False)
description = db.Column(db.String(), nullable=False)
tags = db.relationship("Tag",
secondary=components_tags,
backref='component')
pictures = db.relationship("Picture",
secondary=components_pictures,
backref='component')
# @hybrid_property
@property
def qty(self):
qty_available = [x.qty for x in self.transactions]
s = 0
for i in qty_available:
s += i
return s
def tag_with(self, tag, cat=None):
tag = tag.strip().upper()
if cat: cat = cat.strip().upper()
tag_obj = Tag.query.filter_by(name=tag).first()
cat_obj = TagCategory.query.filter_by(name=cat).first() if cat else None
commit = False
if cat and not cat_obj:
cat_obj = TagCategory(cat)
db.session.add(cat_obj)
commit = True
if not tag_obj:
tag_obj = Tag(tag)
db.session.add(tag_obj)
commit = True
if cat_obj and tag_obj not in cat_obj.tags:
cat_obj.tags.append(tag_obj)
commit = True
if tag_obj not in self.tags:
self.tags.append(tag_obj)
commit = True
if commit:
db.session.commit()
return tag_obj
return None
def remove_tag(self, tag):
for idx, tag_obj in enumerate(self.tags):
if tag_obj.name == tag:
self.tags.pop(idx)
break
db.session.commit()
return self
class Transaction(Base):
__tablename__ = "transaction"
component_id = db.Column(db.Integer, db.ForeignKey('component.id'),
nullable=False)
component = db.relationship("Component", backref="transactions")
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
notes = db.Column(db.String(40))
qty = db.Column(db.Integer, nullable=False)
class TagCategory(db.Model):
__tablename__ = "tag_category"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(25), unique=True, nullable=False)
tags = db.relationship("Tag",
secondary=tag_categories_tags,
backref='categories')
def __init__(self, name):
self.name = name
def __repr__(self):
return "Category Name: %s, Tags: %s" % (self.name, str(
0) if self.tags is None else ",".join([x.name for x in self.tags]))
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(25), unique=True, nullable=False)
def __init__(self, name):
self.name = name
def __repr__(self):
return "%s in: %s" % (self.name, str(
None) if self.categories is None else ",".join([x.name for x in self.categories]))
class TagManager():
@staticmethod
def new_tag(tag, cat=None):
tag = tag.strip().upper()
if cat: cat = cat.strip().upper()
cat_obj = TagCategory.query.filter_by(name=cat).first() if cat else None
tag_obj = Tag.query.filter_by(name=tag).first()
commit = False
if not cat_obj and cat:
cat_obj = TagCategory(cat)
db.session.add(cat_obj)
commit = True
if not tag_obj:
tag_obj = Tag(tag)
db.session.add(tag_obj)
commit = True
if cat_obj and tag_obj not in cat_obj.tags:
cat_obj.tags.append(tag_obj)
commit = True
if commit: db.session.commit()
return tag_obj
class Picture(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
filename = db.Column(db.String(25), unique=True, nullable=False)
components = db.relationship("Component",
secondary=components_pictures,
backref='picture')
def __repr__(self):
components_list = ','.join([x.sku for x in self.components])
return "<Picture: filename:%s, for items(%s): [%s] >" % (
self.filename,
len(self.components),
components_list) | {
"repo_name": "cisko3000/flask-inventory",
"path": "app/models.py",
"copies": "2",
"size": "10733",
"license": "mit",
"hash": 4303363038070691300,
"line_mean": 33.7378640777,
"line_max": 94,
"alpha_frac": 0.557439672,
"autogenerated": false,
"ratio": 3.9722427831236122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5529682455123612,
"avg_score": null,
"num_lines": null
} |
"""app/models.py: Tutorial IV - Databases.
database models: collection of classes whose purpose is to represent the
data that we will store in our database.
The ORM layer (SQLAlchemy) will do the translations required to map
objects created from these classes into rows in the proper database table.
- ORM: Object Relational Mapper; links b/w tables corresp. to objects.
"""
from deepchat import db
import json
class User(db.Model):
"""A model that represents our users.
Jargon/Parameters:
- primary key: unique id given to each user.
- varchar: a string.
- db.Column parameter info:
- index=True: allows for faster queries by associating a given column
with its own index. Use for values frequently looked up.
- unique=True: don't allow duplicate values in this column.
Fields:
id: (db.Integer) primary_key for identifying a user in the table.
name: (str)
posts: (db.relationship)
"""
# Fields are defined as class variables, but are used by super() in init.
# Pass boolean args to indicate which fields are unique/indexed.
# Note: 'unique' here means [a given user] 'has only one'.
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True)
# Relationships are not actual database fields (not shown on a db diagram).
# - backref: *defines* a field that will be added to the instances of
# Posts that point back to this user.
# - lazy='dynamic': "Instead of loading the items, return another query
# object which we can refine before loading items.
conversations = db.relationship('Conversation', backref='user', lazy='dynamic')
def __repr__(self):
return "<User {0}>".format(self.name)
class Chatbot(db.Model):
"""Chatbot. Fields are the same as from yaml config files."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True) # TODO: make unique?
dataset = db.Column(db.String(64))
base_cell = db.Column(db.String(64))
encoder = db.Column(db.String(64))
decoder = db.Column(db.String(64))
learning_rate = db.Column(db.Float)
num_layers = db.Column(db.Integer)
state_size = db.Column(db.Integer)
conversations = db.relationship('Conversation', backref='chatbot', lazy='dynamic')
def __init__(self, name, **bot_kwargs):
self.name = (name or 'Unknown Bot')
self.dataset = bot_kwargs['dataset']
self.base_cell = bot_kwargs['base_cell']
self.encoder = bot_kwargs['encoder']
self.decoder = bot_kwargs['decoder']
self.learning_rate = bot_kwargs['learning_rate']
self.num_layers = bot_kwargs['num_layers']
self.state_size = bot_kwargs['state_size']
def __repr__(self):
return json.dumps("<Chatbot {0}>".format(self.name))
class Conversation(db.Model):
id = db.Column(db.Integer, primary_key=True)
start_time = db.Column(db.DateTime, index=True, unique=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
chatbot_id = db.Column(db.Integer, db.ForeignKey('chatbot.id'))
turns = db.relationship('Turn', backref='conversation', lazy='dynamic')
def __repr__(self):
return '<Conversation between {0} and {1}>'.format(self.user_id, self.chatbot_id)
class Turn(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_message = db.Column(db.Text)
chatbot_message = db.Column(db.Text)
conversation_id = db.Column(db.Integer, db.ForeignKey('conversation.id'))
def __repr__(self):
return 'User: {0}\nChatBot: {1}'.format(
self.user_message, self.chatbot_message)
| {
"repo_name": "mckinziebrandon/DeepChatModels",
"path": "webpage/deepchat/models.py",
"copies": "1",
"size": "3794",
"license": "mit",
"hash": -4557536129941017000,
"line_mean": 38.5208333333,
"line_max": 89,
"alpha_frac": 0.651555087,
"autogenerated": false,
"ratio": 3.6621621621621623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48137172491621627,
"avg_score": null,
"num_lines": null
} |
APP_NAME = 'cronjobs'
PACKAGE_NAME = 'django-%s' % APP_NAME
DESCRIPTION = 'django cronjobs app'
PROJECT_URL = 'http://github.com/divio/%s/' % PACKAGE_NAME
AUTHOR="Patrick Lauber"
EXTRA_CLASSIFIERS = []
INSTALL_REQUIRES = []
VERSION = '0.1.7'
# DO NOT EDIT ANYTHING DOWN HERE... this should be common to all django app packages
from setuptools import setup, find_packages
import os
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
]
if not 'a' in VERSION and not 'b' in VERSION: classifiers.append('Development Status :: 5 - Production/Stable')
elif 'a' in VERSION: classifiers.append('Development Status :: 3 - Alpha')
elif 'b' in VERSION: classifiers.append('Development Status :: 4 - Beta')
for c in EXTRA_CLASSIFIERS:
if not c in classifiers:
classifiers.append(c)
def read(fname):
# read the contents of a text file
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
author=AUTHOR,
name=PACKAGE_NAME,
version=VERSION,
url=PROJECT_URL,
description=DESCRIPTION,
long_description=read('README.rst') + '\n\n\n' + read('HISTORY'),
platforms=['OS Independent'],
classifiers=classifiers,
requires=INSTALL_REQUIRES,
packages=find_packages(),
zip_safe = False
) | {
"repo_name": "divio/django-cronjobs",
"path": "setup.py",
"copies": "1",
"size": "1620",
"license": "bsd-3-clause",
"hash": 1135350465895205100,
"line_mean": 28.4727272727,
"line_max": 111,
"alpha_frac": 0.6802469136,
"autogenerated": false,
"ratio": 3.6902050113895215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4870451924989521,
"avg_score": null,
"num_lines": null
} |
app_name = 'flying-robots'
version = '0.2.0'
description = 'A three-dimensional clone of the classic bsd-robots game.'
author = 'Alan Bunbury'
homepage_url = 'http://bunburya.github.com/FlyingRobots'
download_url = 'https://github.com/bunburya/FlyingRobots'
license_name = 'MIT'
license_text = """Copyright (C) 2012 {}
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""".format(author)
| {
"repo_name": "bunburya/FlyingRobots",
"path": "flying_robots/metadata.py",
"copies": "1",
"size": "1362",
"license": "mit",
"hash": 1465222703128971300,
"line_mean": 49.4444444444,
"line_max": 79,
"alpha_frac": 0.7856093979,
"autogenerated": false,
"ratio": 4.041543026706232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 27
} |
# appname/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from registerApp.models import CustomUser
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
class SignupForm(forms.Form):
email = forms.EmailField(label='Email', required=True)
plan = forms.CharField(label='Plan', required=True)
first_name = forms.CharField(label='first name', required=True)
last_name = forms.CharField(label='last name', required=True)
def clean(self):
if self.cleaned_data.get('plan') != 'Free' and self.cleaned_data.get('plan') != 'Gold' and self.cleaned_data.get('plan') != 'Platinum':
raise ValidationError(
"Invalid Plan value entered"
)
return self.cleaned_data
class Meta:
model = get_user_model()
def save(self, user):
try:
user.email = self.cleaned_data['email']
user.plan = self.cleaned_data['plan']
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
except ValidationError:
return 'Invalid Plan value entered'
class CustomUserCreationForm(UserCreationForm):
"""
A form that creates a user, with no privileges, from the given email and
password.
"""
def __init__(self, *args, **kargs):
super(CustomUserCreationForm, self).__init__(*args, **kargs)
# del self.fields['username']
class Meta:
model = CustomUser
fields = ("email",)
class CustomUserChangeForm(UserChangeForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
def __init__(self, *args, **kargs):
super(CustomUserChangeForm, self).__init__(*args, **kargs)
# del self.fields['username']
class Meta:
model = CustomUser
fields = "__all__"
| {
"repo_name": "siddver007/project_django_allauth_slate_razorpay",
"path": "registerApp/forms.py",
"copies": "1",
"size": "2075",
"license": "mit",
"hash": -7088288106352688000,
"line_mean": 29.9701492537,
"line_max": 143,
"alpha_frac": 0.6308433735,
"autogenerated": false,
"ratio": 4.117063492063492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0031625392664623754,
"num_lines": 67
} |
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Web Notes Technologies Pvt. Ltd. and Contributors"
app_description = "Full Stack Web Application Framwork in Python"
app_icon = "assets/frappe/images/frappe.svg"
app_version = "4.0.0-wip"
app_color = "#3498db"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = "assets/js/frappe.min.js"
app_include_css = [
"assets/frappe/css/splash.css",
"assets/css/frappe.css"
]
web_include_js = [
"assets/js/frappe-web.min.js",
"website_script.js"
]
web_include_css = [
"assets/css/frappe-web.css",
"style_settings.css"
]
website_clear_cache = "frappe.templates.generators.website_group.clear_cache"
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
# permissions
permission_query_conditions = {
"Event": "frappe.core.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.core.doctype.todo.todo.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.core.doctype.event.event.has_permission",
"ToDo": "frappe.core.doctype.todo.todo.has_permission"
}
# bean
doc_events = {
"*": {
"on_update": "frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"on_cancel": "frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"on_trash": "frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications"
},
"User Vote": {
"after_insert": "frappe.templates.generators.website_group.clear_cache_on_doc_event"
},
"Website Route Permission": {
"on_update": "frappe.templates.generators.website_group.clear_cache_on_doc_event"
}
}
scheduler_events = {
"all": ["frappe.utils.email_lib.bulk.flush"],
"daily": [
"frappe.utils.email_lib.bulk.clear_outbox",
"frappe.core.doctype.notification_count.notification_count.delete_event_notification_count",
"frappe.core.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
],
"hourly": [
"frappe.templates.generators.website_group.clear_event_cache"
]
}
| {
"repo_name": "cadencewatches/frappe",
"path": "frappe/hooks.py",
"copies": "1",
"size": "2265",
"license": "mit",
"hash": -7576793671107759000,
"line_mean": 29.6081081081,
"line_max": 104,
"alpha_frac": 0.7311258278,
"autogenerated": false,
"ratio": 2.9530638852672753,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8907244796579958,
"avg_score": 0.05538898329746329,
"num_lines": 74
} |
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Web Notes Technologies Pvt. Ltd."
app_description = "Full Stack Web Application Framework in Python"
app_icon = "assets/frappe/images/frappe.svg"
app_version = "4.4.1"
app_color = "#3498db"
app_email = "support@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = "assets/js/frappe.min.js"
app_include_css = [
"assets/frappe/css/splash.css",
"assets/css/frappe.css"
]
web_include_js = [
"assets/js/frappe-web.min.js",
"website_script.js"
]
web_include_css = [
"assets/css/frappe-web.css",
"style_settings.css"
]
website_clear_cache = "frappe.website.doctype.website_group.website_group.clear_cache"
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Website Group", "Blog Category", "Web Form"]
# permissions
permission_query_conditions = {
"Event": "frappe.core.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.core.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.core.doctype.event.event.has_permission",
"ToDo": "frappe.core.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission"
}
doc_events = {
"*": {
"after_insert": "frappe.core.doctype.email_alert.email_alert.trigger_email_alerts",
"validate": "frappe.core.doctype.email_alert.email_alert.trigger_email_alerts",
"on_update": [
"frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"frappe.core.doctype.email_alert.email_alert.trigger_email_alerts"
],
"after_rename": "frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"on_submit": "frappe.core.doctype.email_alert.email_alert.trigger_email_alerts",
"on_cancel": [
"frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications",
"frappe.core.doctype.email_alert.email_alert.trigger_email_alerts"
],
"on_trash": "frappe.core.doctype.notification_count.notification_count.clear_doctype_notifications"
},
"Website Route Permission": {
"on_update": "frappe.website.doctype.website_group.website_group.clear_cache_on_doc_event"
}
}
scheduler_events = {
"all": ["frappe.utils.email_lib.bulk.flush"],
"daily": [
"frappe.utils.email_lib.bulk.clear_outbox",
"frappe.core.doctype.notification_count.notification_count.clear_notifications",
"frappe.core.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.core.doctype.email_alert.email_alert.trigger_daily_alerts",
],
"hourly": [
"frappe.website.doctype.website_group.website_group.clear_event_cache"
]
}
mail_footer = "frappe.core.doctype.outgoing_email_settings.outgoing_email_settings.get_mail_footer"
| {
"repo_name": "rohitwaghchaure/frappe-alec",
"path": "frappe/hooks.py",
"copies": "1",
"size": "3073",
"license": "mit",
"hash": -6850745078752930000,
"line_mean": 34.3218390805,
"line_max": 106,
"alpha_frac": 0.742271396,
"autogenerated": false,
"ratio": 3.0009765625,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.42432479585,
"avg_score": null,
"num_lines": null
} |
app_name = "frappe"
# app_title = "Frappe Framework"
app_title = "LetzERP"
# app_publisher = "Web Notes Technologies Pvt. Ltd."
app_publisher = "LetzERP Pvt. Ltd."
app_description = "Full Stack Web Application Framework in Python"
app_icon = "assets/frappe/images/LetzERP.svg"
app_version = "5.0.0-alpha"
app_color = "#3498db"
app_email = "support@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = "assets/js/frappe.min.js"
app_include_css = [
"assets/frappe/css/splash.css",
"assets/css/frappe.css"
]
web_include_js = [
"assets/js/frappe-web.min.js",
"website_script.js"
]
web_include_css = [
"assets/css/frappe-web.css",
"style_settings.css"
]
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Blog Category", "Web Form"]
# login
on_session_creation = "frappe.desk.doctype.feed.feed.login_feed"
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Feed": "frappe.desk.doctype.feed.feed.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Feed": "frappe.desk.doctype.feed.feed.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission"
}
doc_events = {
"*": {
"after_insert": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"validate": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_submit": [
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts"
],
"on_trash": "frappe.desk.notifications.clear_doctype_notifications"
}
}
scheduler_events = {
"all": [
"frappe.email.bulk.flush",
"frappe.email.doctype.email_account.email_account.pull"
],
"daily": [
"frappe.email.bulk.clear_outbox",
"frappe.desk.notifications.clear_notifications",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.email_alert.email_alert.trigger_daily_alerts",
]
}
| {
"repo_name": "gangadharkadam/letzfrappe",
"path": "frappe/hooks.py",
"copies": "1",
"size": "3050",
"license": "mit",
"hash": 1983688000791899600,
"line_mean": 31.4468085106,
"line_max": 86,
"alpha_frac": 0.728852459,
"autogenerated": false,
"ratio": 2.913085004775549,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4141937463775549,
"avg_score": null,
"num_lines": null
} |
app_name = "frappe"
# app_title = "Frappe Framework"
app_title = "letzERP"
# app_publisher = "Web Notes Technologies Pvt. Ltd."
app_publisher = "letzERP Pvt. Ltd."
app_description = "Full Stack Web Application Framework in Python"
app_icon = "assets/frappe/images/LetzERP.svg"
app_version = "5.0.0-alpha"
app_color = "#3498db"
app_email = "support@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = "assets/js/frappe.min.js"
app_include_css = [
"assets/frappe/css/splash.css",
"assets/css/frappe.css"
]
web_include_js = [
"assets/js/frappe-web.min.js",
"website_script.js"
]
web_include_css = [
"assets/css/frappe-web.css",
"style_settings.css"
]
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Blog Category", "Web Form"]
# login
on_session_creation = "frappe.desk.doctype.feed.feed.login_feed"
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Feed": "frappe.desk.doctype.feed.feed.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Feed": "frappe.desk.doctype.feed.feed.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission"
}
doc_events = {
"*": {
"after_insert": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"validate": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_submit": [
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.desk.doctype.feed.feed.update_feed"
],
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts"
],
"on_trash": "frappe.desk.notifications.clear_doctype_notifications"
}
}
scheduler_events = {
"all": [
"frappe.email.bulk.flush",
"frappe.email.doctype.email_account.email_account.pull"
],
"daily": [
"frappe.email.bulk.clear_outbox",
"frappe.desk.notifications.clear_notifications",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.email_alert.email_alert.trigger_daily_alerts",
]
}
| {
"repo_name": "letzerp/framework",
"path": "frappe/hooks.py",
"copies": "1",
"size": "3050",
"license": "mit",
"hash": -2013207959912437200,
"line_mean": 31.4468085106,
"line_max": 86,
"alpha_frac": 0.728852459,
"autogenerated": false,
"ratio": 2.913085004775549,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8943970683454794,
"avg_score": 0.03959335606415089,
"num_lines": 94
} |
APP_NAME = 'LXDUI'
APP_CLI_CMD = 'lxdui'
VERSION = '2.1.2'
GIT_URL = 'https://github.com/AdaptiveScale/lxdui.git'
LXD_URL = 'http://localhost:8443'
LICENSE = 'Apache 2.0'
AUTHOR = 'AdaptiveScale, Inc.'
AUTHOR_URL = 'http://www.adaptivescale.com'
AUTHOR_EMAIL = 'info@adaptivescale.com'
KEYWORDS = 'lxc lxc-containers lxd'
'''
The following section is for the default configuration
that will be written to the lxdui.conf file if the file
does not already exist.
'''
AUTO_LOAD_CONFIG = True
DEFAULT_CONFIG_FORMAT = 'ini'
__default_config__ = """
[LXDUI]
lxdui.port = 15151
lxdui.images.remote = https://images.linuxcontainers.org
lxdui.jwt.token.expiration = 1200
lxdui.jwt.secret.key = AC8d83&21Almnis710sds
lxdui.jwt.auth.url.rule = /api/user/login
lxdui.admin.user = admin
lxdui.conf.dir = {{app_root}}/conf
lxdui.conf.file = ${lxdui.conf.dir}/lxdui.conf
lxdui.auth.conf = ${lxdui.conf.dir}/auth.conf
lxdui.ssl.cert = ${lxdui.conf.dir}/client.crt
lxdui.ssl.key = ${lxdui.conf.dir}/client.key
lxdui.log.dir = {{app_root}}/logs
lxdui.log.file = ${lxdui.log.dir}/lxdui.log
lxdui.log.conf = ${lxdui.conf.dir}/log.conf
#lxdui.log.rotate = true
#lxdui.log.max = 10M
#lxdui.log.keep.generations = 5
lxdui.profiles = ${lxdui.conf.dir}/profiles
lxdui.zfs.pool.name = lxdpool
lxdui.app.alias = LXDUI
lxdui.cli = cli
[LXDUI_CERT]
lxdui.cert.country = US
lxdui.cert.state = Texas
lxdui.cert.locale = Dallas
lxdui.cert.org = AdaptiveScale, Inc.
lxdui.cert.ou = OU=AdaptiveScale, DN=com
[LXD]
lxd.bridge.enabled = true
lxd.bridge.name = lxdbr0
lxd.dns.conf.file =
lxd.dns.domain = lxd
lxd.ipv4.addr = 10.5.5.1
lxd.ipv4.netmask = 255.255.255.0
lxd.ipv4.network = 10.5.5.0/24
lxd.ipv4.dhcp.range = 253
lxd.ipv4.dhcp.max = 10.5.5.2,10.5.5.254
lxd.ipv4.nat = true
lxd.ipv6.addr = 2001:470:b368:4242::1
lxd.ipv6.mask = 255.255.255.0
lxd.ipv6.network = 2001:470:b368:4242::/64
lxd.ipv6.nat = false
lxd.ipv6.proxy = false
"""
| {
"repo_name": "AdaptiveScale/lxdui",
"path": "app/__metadata__.py",
"copies": "1",
"size": "1925",
"license": "apache-2.0",
"hash": -4546222546446831600,
"line_mean": 26.8985507246,
"line_max": 56,
"alpha_frac": 0.7153246753,
"autogenerated": false,
"ratio": 2.264705882352941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3480030557652941,
"avg_score": null,
"num_lines": null
} |
APP_NAME = "mvp_births"
BIRTH_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
child_cases_by_status=dict(
num_births_registered=dict(
description="No. of Births Registered",
title="# Births Registered",
indicator_key="dob delivered"
),
num_births_registered_in_facility=dict(
description="No. of Births delivered in a Health Facility during the time period",
title="# Births delivered in Health Facility",
indicator_key="dob delivered_in_facility"
),
),
)
)
COUNT_UNIQUE_BIRTH_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
)
)
# These indicators use MVPChildCasesByAgeIndicatorDefinition
ACTIVE_CHILD_CASES_BY_AGE_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
child_cases_by_status=dict(
under1_cases=dict(
description="No. of children Under 1 year of age.",
title="# Under-1s",
indicator_key="",
max_age_in_days=365,
),
under1_cases_6weeks=dict(
description="No. of children Under 1 year of age.",
title="# Under-1s",
indicator_key="",
fixed_datespan_days=42,
max_age_in_days=365,
),
num_births_occured=dict(
description="Number of births that occured during the time period.",
title="# Births",
max_age_in_days=31,
show_active_only=False,
indicator_key="occured_on"
),
num_births_recorded=dict(
description="Number of births recorded during the time period.",
title="# Births",
max_age_in_days=31,
show_active_only=False,
indicator_key="opened_on"
),
num_children_6to59months=dict(
description="No. of Children 6 to 59 Months of Age during this timespan",
title="# Under-5s 6-59 Months",
max_age_in_days=1825,
min_age_in_days=180,
indicator_key="",
),
under5_cases_30days=dict(
description="No. of Under-5 Children in the past 30 days",
title="# Under-5s",
max_age_in_days=1825,
indicator_key="",
fixed_datespan_months=1,
),
neonate_cases_7days=dict(
description="No. of Neonate Newborns in the past 7 days",
title="# Under-5s",
max_age_in_days=31,
indicator_key="",
fixed_datespan_days=7,
),
num_newborns=dict(
description="No. of newborns",
title="# Newborns",
indicator_key="opened_on",
is_dob_in_datespan=True,
show_active_only=False,
),
birth_weight_registration=dict(
description="Number of births reported with weight recorded during time period",
title="# birth registrations w/ weight recorded",
indicator_key="opened_on weight_recorded",
is_dob_in_datespan=True,
show_active_only=False,
),
low_birth_weight=dict(
description="No. of low birth weight (<2.5 kg) babies born during the time period",
title="# low birth weight (<2.5 kg) births",
indicator_key="opened_on low_birth_weight",
is_dob_in_datespan=True,
show_active_only=False,
),
)
)
)
| {
"repo_name": "SEL-Columbia/commcare-hq",
"path": "custom/_legacy/mvp/static_definitions/couch/births.py",
"copies": "1",
"size": "3805",
"license": "bsd-3-clause",
"hash": 6033289475864329000,
"line_mean": 35.9417475728,
"line_max": 99,
"alpha_frac": 0.5024967148,
"autogenerated": false,
"ratio": 4.167579408543264,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006372017908479155,
"num_lines": 103
} |
APP_NAME = "mvp_chw_referrals"
CHW_REFERRAL_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
urgent_referrals_by_case=dict(
urgent_referral_followups=dict(
description="No. urgent referrals (codes A, E, B) or treatment receiving CHW follow-up within 2 days " \
"of referral / treatment during the time period",
title="# Urgent Referrals w/ Followup within 2 days",
indicator_key="urgent_referral_followup"
),
num_late_followups=dict(
description="# Referred / Treated receiving LATE follow-up (within 3-7 days)",
title="# Referred / Treated receiving LATE follow-up (within 3-7 days)",
indicator_key="urgent_referral_followup_late"
),
num_none_followups=dict(
description="# Referred / Treated receiving NO follow-up",
title="# Referred / Treated receiving NO follow-up",
indicator_key="urgent_referral_followup_none"
),
num_urgent_referrals=dict(
description="No. of Urgent Referrals",
title="# Urgent Referrals",
indicator_key="urgent_or_treatment"
),
)
)
)
# Indicators below use MedianCouchIndicatorDef
MEDIAN_CHW_REFERRAL_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
urgent_referrals_by_case=dict(
median_days_referral_followup=dict(
description="Median number of days to follow-up referral / treatment for "\
"urgent referrals (codes A, E, B) or treatment ",
title="Median # Days to follow up urgent referral",
indicator_key="urgent_referral_followup_days",
)
)
)
)
| {
"repo_name": "qedsoftware/commcare-hq",
"path": "custom/_legacy/mvp/static_definitions/couch/chw_referrals.py",
"copies": "4",
"size": "1853",
"license": "bsd-3-clause",
"hash": 5554367663020233000,
"line_mean": 39.2826086957,
"line_max": 120,
"alpha_frac": 0.5650296816,
"autogenerated": false,
"ratio": 3.8364389233954452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6401468604995446,
"avg_score": null,
"num_lines": null
} |
APP_NAME = "mvp_chw_visits"
CHW_VISIT_ACTIVE_CASES_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
all_cases=dict(
household_cases_90days=dict(
description="No. of active households in the past 90 days",
title="# Households in past 90 days",
indicator_key="",
case_type="household",
fixed_datespan_months=3,
),
household_cases_30days=dict(
description="No. of Households in the past 30 days",
title="# Households in the past 30 days",
indicator_key="",
case_type="household",
fixed_datespan_months=1,
),
household_cases=dict(
description="No. of Active Households ",
title="# Households in specified time period",
indicator_key="",
case_type="household",
),
pregnancy_cases_6weeks=dict(
description="No. of Active Pregnancies in the Past 6 Weeks",
title="# pregnancies in last 6 weeks",
indicator_key="",
case_type="pregnancy",
fixed_datespan_days=42,
),
pregnancy_cases_30days=dict(
description="No. of Active Pregnancies in the Past 30 Days",
title="# pregnancies in the last 30 days",
indicator_key="",
case_type="pregnancy",
fixed_datespan_months=1,
),
pregnancy_cases=dict(
description="No. of Active Pregnancies",
title="# Pregnancies in specified time period",
indicator_key="",
case_type="pregnancy",
),
)
)
)
CHW_VISIT_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
all_visit_forms=dict(
household_visits=dict(
description="No. of household visits",
title="# Household Visits in specified time period",
indicator_key="household",
),
),
),
)
CHW_VISITS_UNIQUE_COUNT_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
all_visit_forms=dict(
household_visits_90days=dict(
description="No. of household visits in the past 90 days.",
title="# household visits in past 90 days",
indicator_key="household",
fixed_datespan_months=3,
),
household_visits_30days=dict(
description="No. of household visits",
title="# Household Visits",
indicator_key="household",
fixed_datespan_months=1,
),
pregnancy_visits_6weeks=dict(
description="No. of pregnancy visits in the past 6 weeks",
title="# Pregnancy Visits in Past 6 Weeks",
indicator_key="pregnancy",
fixed_datespan_days=42,
),
pregnancy_visits_30days=dict(
description="No. of Pregnancy Visits in the last 30 days",
title="# Pregnancy Visits in Past 30 days",
indicator_key="pregnancy",
fixed_datespan_months=1,
),
under5_visits_30days=dict(
description="No. of Under5 visits",
title="# Under5 Visits",
indicator_key="child under5",
fixed_datespan_months=1,
),
neonate_visits_7days=dict(
description="No. of Neonate visits",
title="# Neonate Visits",
indicator_key="child neonate",
fixed_datespan_days=7,
),
under1_visits=dict(
description="No. of children Under-1 receiving on-time scheduled check-ups during the time period",
title="# Under-1 receiving check-ups",
indicator_key="child under1",
),
under1_visits_6weeks=dict(
description="No. of children Under-1 receiving on-time scheduled check-ups during the time period",
title="# Under-1 receiving check-ups",
fixed_datespan_days=42,
indicator_key="child under1",
),
under1_immunization_up_to_date=dict(
description="No. of children Under-1 with up-to-date immunizations at visit during this time period",
title="# Under-1 up-to-date immunizations",
indicator_key="child under1 immunized",
),
newborn_visits=dict(
description="No. of newborns visited 7 days after birth",
title="# Newborns visited 7 days after birth",
indicator_key="child 7days",
),
under6month_exclusive_breastfeeding=dict(
description="No. of children under 6 months reported as exclusively breast-fed during visit",
title="# Under-6-Months reported as exclusively breast-fed during visit",
indicator_key="child under6mo_ex_breast",
),
under6month_visits=dict(
description="No. of children receiving visit who were under 6 months",
title="# Under-6-Month Visits",
indicator_key="child under6mo",
),
household_num_func_bednets=dict(
description="No. of households ASSESSED with at least one functioning bednet per sleeping site",
title="# of households with at least one functioning bednet per sleeping site",
indicator_key="household atleastonebednet",
),
household_num_bednets=dict(
description="No. of households ASSESSED for functioning bednet",
title="# of households ASSESSED for functioning bednet",
indicator_key="household bednet",
),
num_handwashing_latrine=dict(
description="No. of households ASSESSED with handwashing station within 10m of the latrine",
title="# of households with handwashing station within 10m of the latrine",
indicator_key="household handwashing10metres",
),
num_handwashing=dict(
description="No. of households ASSESSED with handwashing station",
title="# of households with handwashing station",
indicator_key="household handwashing",
),
)
)
)
| {
"repo_name": "benrudolph/commcare-hq",
"path": "custom/_legacy/mvp/static_definitions/couch/chw_visits.py",
"copies": "4",
"size": "6673",
"license": "bsd-3-clause",
"hash": -7878150383016447000,
"line_mean": 41.5031847134,
"line_max": 117,
"alpha_frac": 0.5330436086,
"autogenerated": false,
"ratio": 4.347231270358306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6880274878958307,
"avg_score": null,
"num_lines": null
} |
APP_NAME = "mvp_maternal_health"
MATERNAL_HEALTH_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
pregnancy_danger_signs=dict(
pregnancy_visit_danger_sign=dict(
description="No. of Pregnant Women With Danger Sign Recorded During Visit",
title="# Pregnant Women w/ Danger Signs",
indicator_key="danger_sign",
),
pregnancy_visit_danger_sign_referral=dict(
description="No. of Pregnant Women Referred for Danger Signs",
title="# Pregnant Women Referred for Danger Signs",
indicator_key="danger_sign referred",
)
)
)
)
# use CountUniqueIndicatorDef
COUNT_UNIQUE_MATERNAL_HEALTH_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
anc_visits=dict(
edd_soon_anc4=dict(
description="No. of Pregnant women reporting at least four (4) Antenatal Care "
"visit by 8 months of gestation this time period",
title="Pregnant receiving 4 ANC visits to facility by 8 months gestation",
indicator_key="anc4"
),
no_anc=dict(
description="No. of Pregnant women who did not have an ANC visit by 4 months of gestation time",
title="# Pregnant women who did not have an ANC visit by 4 months of gestation time",
indicator_key="no_anc"
),
anc_visit_120=dict(
description="No. of Pregnant women who received a visit by 4 months of gestation",
title="# Pregnant women who received a visit by 4 months of gestation",
indicator_key="anc_visit_120"
),
edd_soon_visit=dict(
description="No. of Pregnant women who have at least one visit by 8 months of gestation",
title="Pregnant receiving at least one ANC visit by 8 months gestation",
indicator_key="visit"
)
),
)
)
# Use SumLastEmittedCouchIndicatorDef
SUM_LAST_UNIQUE_MATERNAL_HEALTH_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
family_planning=dict(
household_num_fp=dict(
description="No. households using family planning",
title="# Households Using Family Planning",
indicator_key="num_fp"
),
household_num_ec=dict(
description="No. of Households Seen for Family Planning",
title="# of Women 15-49 Seen for Family Planning",
indicator_key="num_ec"
)
),
)
)
| {
"repo_name": "qedsoftware/commcare-hq",
"path": "custom/_legacy/mvp/static_definitions/couch/maternal_health.py",
"copies": "4",
"size": "2681",
"license": "bsd-3-clause",
"hash": 7936399941027489000,
"line_mean": 37.3,
"line_max": 112,
"alpha_frac": 0.5676986199,
"autogenerated": false,
"ratio": 3.8686868686868685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001329475510697107,
"num_lines": 70
} |
APP_NAME = 'PyWright'
cfg = {
'name':APP_NAME,
'version':'beta1',
'description':'',
'author':'',
'author_email':'',
'url':'',
'py2exe.target':'',
'py2exe.icon':'bb.ico', #64x64
'py2exe.binary':APP_NAME, #leave off the .exe, it will be added
'py2app.target':'',
'py2app.icon':'icon.icns', #128x128
'cx_freeze.cmd':'~/src/cx_Freeze-3.0.3/FreezePython',
'cx_freeze.target':'',
'cx_freeze.binary':APP_NAME,
}
# usage: python setup.py command
#
# sdist - build a source dist
# py2exe - build an exe
# py2app - build an app
# cx_freeze - build a linux binary (not implemented)
#
# the goods are placed in the dist dir for you to .zip up or whatever...
from distutils.core import setup, Extension
try:
import py2exe
except:
pass
import sys
import glob
import os
import shutil
try:
cmd = sys.argv[1]
except IndexError:
print 'Usage: setup.py py2exe|py2app|cx_freeze'
raise SystemExit
# utility for adding subdirectories
def add_files(dest,generator,ignorefiles=[],ignorefolders=[]):
for dirpath, dirnames, filenames in generator:
for name in ['CVS', '.svn']+ignorefolders:
if name in dirnames:
dirnames.remove(name)
dest.extend([dirpath+"/"+x for x in dirnames])
for name in filenames:
if '~' in name: continue
suffix = os.path.splitext(name)[1]
if suffix in ['.pyc', '.pyo']+ignorefiles: continue
if name[0] == '.': continue
filename = os.path.join(dirpath, name)
dest.append(filename)
# define what is our data
bexe = 1
bart = 1
bmusic = 0
data = []
if bexe:
add_files(data,os.walk('fonts'))
add_files(data,os.walk('sfx'))
add_files(data,os.walk('art/general'))
add_files(data,os.walk("core"),["cache"])
data+=["core/cache"]
data+=["games/","music/","movies/","downloads/"]
data+=["doc.txt","changelog.txt","data.txt"]
data+=["art/ev/","art/port/","art/fg/","art/bg/"]
data+=["art/bg/"+x for x in os.listdir("art/bg/") if x.endswith(".png")]
data+=["art/fg/"+x for x in os.listdir("art/fg/") if x.endswith(".png") or x.endswith(".gif") or x.endswith(".txt")]
# build the sdist target
if cmd == 'sdist' and bexe:
f = open("MANIFEST.in","w")
for l in data: f.write("include "+l+"\n")
for l in src: f.write("include "+l+"\n")
f.close()
setup(
name=cfg['name'],
version=cfg['version'],
description=cfg['description'],
author=cfg['author'],
author_email=cfg['author_email'],
url=cfg['url'],
)
# build the py2exe target
try:
from py2exe.build_exe import py2exe
except:
class py2exe:
pass
class Py2exe(py2exe):
def initialize_options(self):
# Add a new "upx" option for compression with upx
py2exe.initialize_options(self)
self.upx = 0
def copy_file(self, *args, **kwargs):
# Override to UPX copied binaries.
(fname, copied) = result = py2exe.copy_file(self, *args, **kwargs)
basename = os.path.basename(fname)
if (copied and self.upx and
(basename[:6]+basename[-4:]).lower() != 'python.dll' and
fname[-4:].lower() in ('.pyd', '.dll')):
os.system('upx --best "%s"' % os.path.normpath(fname))
return result
def patch_python_dll_winver(self, dll_name, new_winver=None):
# Override this to first check if the file is upx'd and skip if so
if not self.dry_run:
if not os.system('upx -qt "%s" >nul' % dll_name):
if self.verbose:
print "Skipping setting sys.winver for '%s' (UPX'd)" % \
dll_name
else:
py2exe.patch_python_dll_winver(self, dll_name, new_winver)
# We UPX this one file here rather than in copy_file so
# the version adjustment can be successful
if self.upx:
os.system('upx --best "%s"' % os.path.normpath(dll_name))
if cmd in ('script',) and bexe:
dist_dir = "scriptdist"
data_dir = dist_dir
data+=["updater.py","PyWright.py"]
if cmd in ('py2exe',) and bexe:
dist_dir = os.path.join('dist',cfg['py2exe.target'])
data_dir = dist_dir
dest = cfg['py2exe.binary']+'.py'
setup(
#zipfile=None,
cmdclass = {"py2exe":Py2exe},
options={'py2exe':{
'dist_dir':dist_dir,
'dll_excludes':['_dotblas.pyd',"cdrom.pyd"],
'packages':['encodings','pygame','numpy'],
'includes':['__future__'],
'ignores':['numpy.distutils.tests'],
'excludes':['curses','email','logging','numarray',
'Tkinter','tcl',"ssl",
"stringprep","StringIO","bz2","_ssl",
"doctest","optparse","popen2","Numeric","OpenGL",
"multiprocessing","compiler","distutils",
"setuptools","psyco"],
'compressed':1,
'bundle_files':2,
'ascii':1
}},
windows=[{
'script':"PyWright.py",
'icon_resources':[(1,"art/general/bb.ico")],
},
{
"script":"updater.py"}],
)
# build the py2app target
if cmd == 'py2app' and bexe:
dist_dir = os.path.join('dist',cfg['py2app.target']+'.app')
#data_dir = os.path.join(dist_dir,'Contents','Resources')
data_dir = 'dist'
from setuptools import setup
OPTIONS = {'argv_emulation': True}#, 'iconfile':cfg['py2app.icon']}
setup(
app=['PyWright_run.py'],
data_files=[],
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
# make the cx_freeze target
if cmd == 'cx_freeze' and bexe:
dist_dir = os.path.join('dist',cfg['cx_freeze.target'])
data_dir = dist_dir
os.system('%s --install-dir %s --target-name %s run_game.py'%(cfg['cx_freeze.cmd'],cfg['cx_freeze.binary'],dist_dir))
# recursively make a bunch of folders
def make_dirs(dname_):
parts = list(os.path.split(dname_))
dname = None
while len(parts):
if dname == None:
dname = parts.pop(0)
else:
dname = os.path.join(dname,parts.pop(0))
if not os.path.isdir(dname):
os.mkdir(dname)
# copy data into the binaries
if cmd in ('py2exe','cx_freeze','script', 'py2app'):
dest = data_dir
for fname in data:
print fname
dname = os.path.join(dest,os.path.dirname(fname))
make_dirs(dname)
if not os.path.isdir(fname):
print "copy",fname,dname
shutil.copy(fname,dname)
if cmd == "py2exe":
os.mkdir("library")
for fname in os.listdir("extradlls"):
shutil.copy("extradlls/"+fname,"library/"+fname)
if cmd=="py2exe":
files = []
dirs = []
for fname in data+[x for x in os.listdir("dist") if not os.path.isdir("dist/"+x)]:
inno = fname.replace("/","\\")
if os.path.isdir("dist\\"+inno):
dirs.append("Name: {app}\\"+fname+"\n")
else:
dest = ""
if "\\" in inno:
dest = inno[:inno.rfind("\\")]
d = (inno,dest)
files.append("Source: dist\%s; DestDir: {app}\%s; Flags: overwritereadonly\n"%d)
#~ inno = open("setup.iss","w")
#~ inno.write("""[Setup]
#~ AppName=PyWright
#~ AppVerName=PyWright Beta6
#~ DefaultDirName={pf}\PyWright
#~ DefaultGroupName=PyWright
#~ Compression=lzma
#~ OutputBaseFilename=pywright-beta6-setup
#~ PrivilegesRequired=none
#~ UninstallDisplayIcon={app}\PyWright.exe
#~ SetupIconFile=C:\Users\saluk\Desktop\dev\pyphoenix\PyWright_trunk\\bb.ico
#~ InternalCompressLevel=ultra64
#~ [Icons]
#~ Name: {group}\PyWright; Filename: {app}\PyWright.exe; WorkingDir: {app}
#~ Name: {group}\updater; Filename: {app}\updater.exe
#~ Name: {group}\uninstall; Filename: {app}\unins000.exe""")
#~ inno.write("\n\n[Files]\n")
#~ inno.writelines(files)
#~ inno.write("\n[Dirs]\n")
#~ inno.writelines(dirs)
#~ inno.close()
| {
"repo_name": "crxtrdude/pywright",
"path": "setup.py",
"copies": "1",
"size": "8188",
"license": "bsd-3-clause",
"hash": -9022603167092121000,
"line_mean": 31.752,
"line_max": 121,
"alpha_frac": 0.5615534929,
"autogenerated": false,
"ratio": 3.364009860312243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9310156052407124,
"avg_score": 0.02308146016102374,
"num_lines": 250
} |
APP_NAME = 'sitepermissions'
PACKAGE_NAME = 'django-%s' % APP_NAME
DESCRIPTION = 'django sitepermissions app'
PROJECT_URL = 'http://github.com/ojii/%s/' % PACKAGE_NAME
INSTALL_REQUIRES = [
]
AUTHOR = "Jonas Obrist"
EXTRA_CLASSIFIERS = [
]
# DO NOT EDIT ANYTHING DOWN HERE... this should be common to all django app packages
from setuptools import setup, find_packages
import os
version = __import__(APP_NAME).__version__
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
]
if not 'a' in version and not 'b' in version: classifiers.append('Development Status :: 5 - Production/Stable')
elif 'a' in version: classifiers.append('Development Status :: 3 - Alpha')
elif 'b' in version: classifiers.append('Development Status :: 4 - Beta')
for c in EXTRA_CLASSIFIERS:
if not c in classifiers:
classifiers.append(c)
media_files = []
setup(
author=AUTHOR,
name=PACKAGE_NAME,
version=version,
url=PROJECT_URL,
description=DESCRIPTION,
long_description=DESCRIPTION,
platforms=['OS Independent'],
classifiers=classifiers,
requires=INSTALL_REQUIRES,
packages=find_packages(),
package_dir={
APP_NAME: APP_NAME,
},
zip_safe = False
) | {
"repo_name": "czpython/django-sitepermissions",
"path": "setup.py",
"copies": "2",
"size": "1566",
"license": "bsd-3-clause",
"hash": -2377350033671704000,
"line_mean": 26.4912280702,
"line_max": 111,
"alpha_frac": 0.6794380587,
"autogenerated": false,
"ratio": 3.764423076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5443861135623077,
"avg_score": null,
"num_lines": null
} |
APP_NAME = 'Stripe Growl'
class StripeNotifier(object):
NOTIFICATIONS = [
# App events
'info',
'error',
# Stripe event types
'charge.succeeded',
]
def __init__(self, api_key, max_events, since_id=None):
"""
max_events: if None, defaults to 4. This is the maximum number of event
notifications that Growl will display during a single poll.
since_id: a POSIX timestamp in UTC that is used to filter Stripe
events. If None, defaults to the current time (in UTC).
"""
# Setup Stripe
import stripe
self.stripe = stripe
self.stripe.api_key = api_key
self.max_events = max_events
# Specified 'latest known' event (or right now)
from time import time
self.since_id = since_id or int(time())
# Setup Growl notifier
import gntp.notifier
self.notifier = gntp.notifier.GrowlNotifier(
applicationName = APP_NAME,
notifications = self.NOTIFICATIONS,
defaultNotifications = self.NOTIFICATIONS,
)
try:
self.notifier.register()
except:
from sys import exit, stderr
stderr.write("ERR: Unable to register %s with Growl.\n" % APP_NAME)
exit(1)
def _notify(self, noteType, title, message):
"""
Use the Growl notifier to send an event, where note type is in
StripeNotifier.NOTIFICATIONS.
"""
self.notifier.notify(
noteType = noteType,
title = title,
description = message,
)
def _handle_event(self, event):
"""
Displays a notification regarding the event based on the event type.
Currently, only 'charge.succeeded' is supported.
"""
if event.type == 'charge.succeeded':
from datetime import datetime
dformat = '%b. %d, %I:%M %p'
charge = event.data.object.to_dict()
created = datetime.fromtimestamp(charge['created'])
title = 'New Charge! (%s)' % created.strftime(dformat)
message = "$%(amount).2f - %(description)s" % {
'amount': charge['amount'] / 100.,
'description': charge.get('description', '(no description)'),
}
self._notify(event.type, title, message)
else: # Only handle charge.succeeded right now
pass
def poll(self):
"""
Return a boolean indicating success (True) or failure (False). Updates
StripeNotifier's state to only retrieve new events.
"""
# Retrieve events
try:
rsp = self.stripe.Event.all(
created = { 'gt': self.since_id },
type = 'charge.succeeded',
count = self.max_events,
)
except self.stripe.StripeError, e:
msg = e.json_body['error']['message']
self._notify('error', '%s: Error' % APP_NAME, msg)
return False
except:
self._notify(
'error', '%s: Error' % APP_NAME, 'Unable to retrieve Events.'
)
return False
else:
count = rsp.count
events = rsp.data
if not len(events): # No new events (still a success)
return True
# No errors; handle events
self.since_id = events[0].created
for event in events:
self._handle_event(event)
# Notify of any hidden events
rem = count - self.max_events
if rem > 0:
self._notify('info', APP_NAME, '%d events not shown.' % rem)
return True
if __name__ == '__main__':
# Setup argument parser
import argparse
parser = argparse.ArgumentParser(
description='Receive Growl notifications of Stripe events',
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('key', type=str, help='Your Stripe secret API key.')
parser.add_argument(
'--poll-events', type=int, default=4,
help='Maximum number of events to display at a time.'
)
parser.add_argument(
'--poll-interval', type=int, default=5,
help='Delay (in minutes) between polls for Stripe events (minimum: 5)'
)
args = vars(parser.parse_args())
# Start notifier
sn = StripeNotifier(
api_key = args['key'],
max_events = args['poll_events'],
)
from time import sleep
if args['poll_interval'] < 1:
import sys
sys.stderr.write('ERR: You cannot provide a polling interval under'
' 1 minute. The preference is that you keep it at or above 5'
' minutes.\n')
sys.exit(1)
while True:
sn.poll()
sleep(args['poll_interval'] * 60)
| {
"repo_name": "michaelschade/stripe_growl",
"path": "stripe_growl.py",
"copies": "1",
"size": "5011",
"license": "mit",
"hash": 5305138244858627000,
"line_mean": 34.7928571429,
"line_max": 79,
"alpha_frac": 0.5388146079,
"autogenerated": false,
"ratio": 4.349826388888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5388640996788889,
"avg_score": null,
"num_lines": null
} |
# app
from app import app
from helpers import stripped, name_image_file
# flickr_api
import flickr_api, json, urllib
from flickr_api.api import flickr
flickr_key = app.config['FLICKR_API_KEY']
flickr_secret = app.config['FLICKR_API_SECRET']
# authorize access to flickr
flickr_api.set_keys(api_key = flickr_key, api_secret = flickr_secret)
# amazon s3
import boto3
s3 = boto3.resource('s3')
client = boto3.client('s3')
# flask_mail
from flask_mail import Mail, Message
mail = Mail(app)
# mgmt pkgs
import os # file and dir mgmt
import shutil # path disintegration
from bs4 import BeautifulSoup # parse xml
import requests # fetch web content
from PIL import Image # process images
from StringIO import StringIO # glue requests and PIL
from ratelimit import rate_limited # comply with Flickr's API policy
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def set_up_local_bucket(path):
if not os.path.exists(os.path.join(app.config['APP_ROOT'], 'foodstuff')):
os.mkdir(os.path.join(app.config['APP_ROOT'], 'foodstuff'))
if not os.path.exists(path):
os.mkdir(path)
@rate_limited(1)
def get_image_page(tag, per_page, page):
results = flickr.photos.search(tags=tag, per_page=per_page, page=page)
soup = BeautifulSoup(results, 'lxml-xml')
return soup
@rate_limited(1)
def get_image_sizes(image_id):
sizes = flickr.photos.getSizes(photo_id=image_id)
soup = BeautifulSoup(sizes, 'lxml-xml').find_all('size')
return soup
def fill_up(tag, bucketname, path, amount):
silo = get_image_page(tag, 100, 1)
total = int(silo.photos['total'])
if amount > total or amount <= 0:
amount = total
total_pages = total / 100 + 1
image_num = 1
for page in xrange(1, total_pages):
for image in silo.find_all('photo'):
try:
image_id = image['id']
sizes = get_image_sizes(image_id)
image_source = None
image_source = sizes[-1]['source'] # always grab biggest img
if image_source:
name = name_image_file(image_id, image['title'])
r = requests.get(image_source)
try:
r.raise_for_status()
except Exception as exc:
print("There was a problem: {0}".format(exc))
image_file = open(os.path.join(path, name), 'wb')
for chunk in r.iter_content(100000):
image_file.write(chunk)
image_file.close()
s3.Object(bucketname, name).put(Body=open(os.path.join(path, name), 'rb'))
os.remove(os.path.join(path, name))
except Exception as exc:
print("There was a problem: {0}".format(exc))
image_num += 1
if image_num > amount:
return
silo = get_image_page(tag, 100, page+1)
import zipfile
def zipper(email, tag, bucket, path, bucketname):
with app.app_context():
zippy = '.'.join([tag, 'zip'])
with zipfile.ZipFile(zippy, 'w', allowZip64=True) as z:
for key in bucket.objects.all():
ext = key.key.split('.')[1]
if ext not in ('jpg', 'jpeg'):
key.delete()
else:
bucket.download_file(key.key, key.key)
z.write(key.key)
os.remove(key.key)
key.delete()
s3.Object(bucketname, zippy).put(Body=open(os.path.join(path, zippy), 'rb'))
url = client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': bucketname,
'Key': zippy
},
ExpiresIn=3600*24*3 # three days
)
email_zips(email, url)
os.remove(os.path.join(path, zippy))
def email_zips(email, url):
with app.app_context():
msg = Message(subject="Tell your neural nets, dinner is served!",
sender="no-reply@feedingtube.host",
recipients=[email],
bcc=['phraznikov+ft@gmail.com'])
msg.body = "Use this link to download the images you requested: {0}\n\nNote: this link will only be valid for three days.".format(url)
mail.send(msg)
# process user request for images
def get_food(email, tag, amount):
with app.app_context():
if type(amount) is not int:
amount = int(amount)
clean_tag = ''.join(tag.split())
container = stripped(email + clean_tag)
bucketname = 'feedingtube-a-' + stripped(email) + '-' + clean_tag
path = os.path.join(app.config['APP_ROOT'], 'foodstuff', container)
# create fresh s3 bucket
bucket = s3.create_bucket(Bucket=bucketname)
# nav to tmp dir to process file downloads
set_up_local_bucket(path)
# plumb images from flickr into local dir, then to s3
fill_up(tag, bucketname, path, amount)
os.chdir(path)
zipper(email, tag, bucket, path, bucketname)
os.chdir(app.config['APP_ROOT'])
shutil.rmtree(path)
| {
"repo_name": "phrazzld/feedingtube",
"path": "feedtube.py",
"copies": "1",
"size": "5318",
"license": "mit",
"hash": -8536054303716623000,
"line_mean": 35.4246575342,
"line_max": 142,
"alpha_frac": 0.5665663783,
"autogenerated": false,
"ratio": 3.6474622770919067,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4714028655391907,
"avg_score": null,
"num_lines": null
} |
import sys, os # Python path kludge - omit these 2 lines if BrickPython is installed.
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
from BrickPython.TkApplication import TkApplication
from BrickPython.Sensor import Sensor, TouchSensor, LightSensor,\
UltrasonicSensor
import logging
class App(TkApplication):
'''Application to do stuff.
'''
def __init__(self):
settings = {'1': LightSensor, '2': TouchSensor, '3': UltrasonicSensor }
TkApplication.__init__(self, settings)
self.root.wm_title("Trial running")
for c in "ABCD":
self.motor(c).zeroPosition()
self.addActionCoroutine(self.motor(c).runAtConstantSpeed(180))
for c in settings:
self.addSensorCoroutine(self.showChanges(c))
def showChanges(self, sensorId):
sensor = self.sensor(sensorId)
while True:
for i in sensor.waitForChange(): yield
print sensor
def showSensorValues(self, sensorId):
sensor = self.sensor(sensorId)
while True:
for i in self.waitMilliseconds(1000): yield
print sensor
if __name__ == "__main__":
logging.basicConfig(format='%(message)s', level=logging.DEBUG) # All log messages printed to console.
logging.info( "Starting" )
app = App()
app.mainloop()
| {
"repo_name": "charlesweir/BrickPython",
"path": "ExamplePrograms/TrialApp.py",
"copies": "1",
"size": "1443",
"license": "mit",
"hash": -5511892959312622000,
"line_mean": 32.5581395349,
"line_max": 105,
"alpha_frac": 0.6507276507,
"autogenerated": false,
"ratio": 3.738341968911917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98475779359138,
"avg_score": 0.008298336739623281,
"num_lines": 43
} |
"""App."""
import os
import altair as alt
import streamlit as st
import i18n
lang = os.environ.get('LANG') or 'en'
i18n.set('filename_format', '{locale}.{format}')
i18n.set('locale', lang)
i18n.set('fallback', 'en')
i18n.load_path.append(os.path.dirname(__file__) + '/../locales')
from ..model.parameters import Parameters
from ..model.sir import Sir
from ..model.ppe import PPE
from .charts import (
build_admits_chart,
build_census_chart,
build_sim_sir_w_date_chart,
)
from .st_display import (
display_download_link,
display_excel_download_link,
display_footer,
display_header,
display_sidebar,
hide_menu_style,
)
from ..constants import (
DOCS_URL,
)
def main():
# This is somewhat dangerous:
# Hide the main menu with "Rerun", "run on Save", "clear cache", and "record a screencast"
# This should not be hidden in prod, but removed
# In dev, this should be shown
st.markdown(hide_menu_style, unsafe_allow_html=True)
d = Parameters.create(os.environ, [])
ppe = PPE(os.environ)
p = display_sidebar(st, d)
m = Sir(p)
if not m.reasonable_model_parameters:
st.subheader("Based on the current parameters, estimated pre-mitigation doubling time is greater than 15 days. Try different parameters.")
return
display_header(st, m, p)
st.subheader(i18n.t("app-new-admissions-title"))
st.markdown(i18n.t("app-new-admissions-text"))
admits_chart = build_admits_chart(alt=alt, admits_floor_df=m.admits_floor_df, max_y_axis=p.max_y_axis, use_log_scale=p.use_log_scale)
st.altair_chart(admits_chart, use_container_width=True)
display_download_link(
st,
p,
filename=f"{p.current_date}_projected_admits.csv",
df=m.admits_df,
)
st.subheader(i18n.t("app-admitted-patients-title"))
st.markdown(i18n.t("app-admitted-patients-text"))
census_chart = build_census_chart(alt=alt, census_floor_df=m.census_floor_df, max_y_axis=p.max_y_axis, use_log_scale=p.use_log_scale)
st.altair_chart(census_chart, use_container_width=True)
display_download_link(
st,
p,
filename=f"{p.current_date}_projected_census.csv",
df=m.census_df,
)
st.subheader(i18n.t("app-PPE-title"))
st.markdown(i18n.t("app-PPE-text"))
display_excel_download_link(st, ppe.filename, ppe.src)
st.markdown(i18n.t("app-PPE-text2"))
display_download_link(
st,
p,
filename=f"{p.current_date}_projected_census_for_ppe_calculator.csv",
df=m.ppe_df,
)
st.markdown(
i18n.t("app-PPE-documentation").format(
link_to_docs="{docs_url}/ppe-calculator".format(docs_url=DOCS_URL),
link_to_tutorial="{docs_url}/ppe-calculator/ppe-basic-tutorial".format(docs_url=DOCS_URL),
),
unsafe_allow_html=True
)
st.subheader(i18n.t("app-SIR-title"))
st.markdown(i18n.t("app-SIR-text"))
sim_sir_w_date_chart = build_sim_sir_w_date_chart(alt=alt, sim_sir_w_date_floor_df=m.sim_sir_w_date_floor_df, use_log_scale=p.use_log_scale)
st.altair_chart(sim_sir_w_date_chart, use_container_width=True)
display_download_link(
st,
p,
filename=f"{p.current_date}_sim_sir_w_date.csv",
df=m.sim_sir_w_date_df,
)
display_footer(st)
| {
"repo_name": "CodeForPhilly/chime",
"path": "src/penn_chime/view/st_app.py",
"copies": "1",
"size": "3332",
"license": "mit",
"hash": 7359756714632330000,
"line_mean": 30.1401869159,
"line_max": 146,
"alpha_frac": 0.6494597839,
"autogenerated": false,
"ratio": 2.8333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8957121335409111,
"avg_score": 0.005134356364844523,
"num_lines": 107
} |
'''App
========
This module provides an App class used to run a Moa experiment.
'''
import os
from os import path
import tempfile
import json
import kivy
from kivy.properties import StringProperty, ObjectProperty
from kivy.app import App
from kivy.lang import Builder
from kivy import resources
import moa.factory_registers
from moa.compat import decode_dict, PY2
from moa.logger import Logger
from moa.base import MoaBase
__all__ = ('MoaApp', )
class MoaApp(MoaBase, App):
'''App class runs Moa experiments as well as the Kivy GUI.
'''
root = ObjectProperty(None, allownone=True, rebind=True)
'''The root GUI widget used by Kivy for the UI. Read only.
'''
root_stage = ObjectProperty(None, allownone=True, rebind=True)
''' The root :class:`~moa.stage.MoaStage` that contains the experiment.
Must be manually set.
'''
data_directory = StringProperty('')
''' The directory where application data files are stored. This path
is automatically added to the kivy search path.
It must be set by the application if used.
'''
recovery_directory = StringProperty('')
'''The recovery directory to use with :meth:`dump_recovery` if
not empty. This is where recovery files are saved by default.
'''
recovery_filename = StringProperty('')
'''The filename of the last recovery file written. It is automatically
set by :meth:`dump_recovery`.
'''
def __init__(self, **kw):
super(MoaApp, self).__init__(**kw)
Builder.load_file(path.join(path.dirname(__file__),
'data', 'moa_style.kv'))
def add_data_directory(*largs):
if not self.data_directory:
return
resources.resource_add_path(path.expanduser(self.data_directory))
self.fbind('data_directory', add_data_directory)
add_data_directory()
def dump_recovery(self, stage=None, save_unnamed_stages=True,
include_knsname=True, prefix='', directory=''):
'''Dumps the name and value for all the properties listed in
:attr:`~moa.stage.MoaStage.restore_properties` for all the stages
starting with and descending from ``stage`` into a
uniquely named json file.
The output file extension is `mrec`.
:Parameters:
`stage`: :class:`~moa.stage.MoaStage`
The root stage from where to start to recursively dump the
properties. If None, :attr:`root_stage` is used.
`save_unnamed_stages`: bool
Whether to also save stages with no knsname. Defaults to True.
`include_knsname`: bool
Whether the knsname should be dumped along with the properties
even if not provided in the
:attr:`~moa.stage.MoaStage.restore_properties` list. Defaults
to True.
`prefix`: str
The prefix to use for the output filename.
`directory`: str
The directory in which to save the output file. If empty,
:attr:`recovery_directory` is used.
:Returns:
The filename of the created recovery file.
.. note::
Unnamed stages (with empty knsname) will be included, but will be
given a empty dict.
For example::
>>> app = MoaApp()
>>> stage = MoaStage(knsname='stage1',
... restore_properties=['started', 'finished'])
>>> stage.add_stage(MoaStage(knsname='child1',
... restore_properties=['count']))
>>> print(app.dump_recovery(stage, prefix='example_',
... directory='/'))
'E:\\example_sh8aui.mrec'
Its contents is::
[
{
"finished": false,
"knsname": "stage1",
"started": false
},
[
{
"count": 0,
"knsname": "child1"
}
]
]
'''
if not stage:
stage = self.root_stage
if not stage:
raise ValueError('A root stage was not provided')
if not directory:
directory = self.recovery_directory
if not directory or not path.isdir(directory):
raise ValueError(
'A valid recovery directory path was not provided')
def walk_stages(stage):
'''Returns a list, where at each level, starting from the root,
there's a dict describing the recoverable states of the stage
followed by a list of the recoverable states of the children, for
each child. E.g::
[root,
[child1,
[child1.1,
[child1.1.1], [child1.1.2]]],
[child2,
[child2.1,
[child2.1.1]],
[child2.2]],
...]
'''
d = {}
if stage.knsname or save_unnamed_stages:
restored = stage.restored_properties
d = {k: restored.get(k, getattr(stage, k))
for k in stage.restore_properties}
if include_knsname:
d['knsname'] = stage.knsname
state = [d]
children = []
for child in stage.stages:
if child.knsname or save_unnamed_stages:
children.append(walk_stages(child))
else:
children.append([{}])
if children:
state.extend(children)
return state
directory = path.abspath(path.expanduser(directory))
fh, fn = tempfile.mkstemp(suffix='.mrec', prefix=prefix,
dir=directory)
os.close(fh)
d = {'encoding': 'utf-8'} if PY2 else {}
with open(fn, 'w') as fh:
json.dump(walk_stages(stage), fh, indent=2, sort_keys=True,
separators=(',', ': '), **d)
self.recovery_filename = fn
return fn
def load_recovery(
self, filename='', stage=None, recover_unnamed_stages=True,
verify=True):
'''Recovers the properties from a json file created by
:meth:`dump_recovery` and restores them to ``stage`` and it's children
stages recursively.
For each stage, the recovered dict is stored to
:attr:`~moa.stage.MoaStage.restored_properties`. `knsname` if present
is always removed before recovering.
:Parameters:
`filename`: str
The full filename of the json file. If empty it uses
:attr:`recovery_filename`. Defaults to empty string.
`stage`: :class:`~moa.stage.MoaStage`
The root stage to which the attributes will be restored.
If None, :attr:`root_stage` is used. Defaults to None.
`recover_unnamed_stages`: bool
Whether to recover stages that have no knsname. Defaults to
True.
`verify`: bool
Whether to verify that the recovered stage structure match the
structure of the ``stage`` passed in. If True, the knsnames of
all the stages must match and the number of stages must match.
Matching and recovery is performed using position in the file
and in :attr:`~moa.stage.MoaStage.stages`.
For example, recovering using the example file generated in
:meth:`dump_recovery` ::
>>> from moa.stage import MoaStage
>>> from moa.app import MoaApp
>>> app = MoaApp()
>>> stage = MoaStage(knsname='stage1')
>>> child1 = MoaStage(knsname='child1')
>>> stage.add_stage(child1)
>>> app.load_recovery('E:\\example_sh8aui.mrec', stage=stage)
>>> print(stage.restored_properties, child1.restored_properties)
({}, {})
>>> print(stage.restored_properties, child1.restored_properties)
({'started': False, 'finished': False}, {'count': 0})
'''
if stage is None:
stage = self.root_stage
if stage is None:
raise ValueError('Root stage was not provided')
if not filename:
filename = self.recovery_filename
if not filename or not path.isfile(filename):
raise ValueError(
'A valid recovery filename was not provided')
with open(filename) as fh:
decode = decode_dict if PY2 else None
state = json.load(fh, object_hook=decode)
def apply_state(stage, state):
'''Function called recursively to apply the recovery properties
list of dicts to the stage and substages.
'''
if not recover_unnamed_stages and not stage.knsname:
return
if not len(state):
Logger.debug(
"Cannot find recovery info for stage {}".format(stage))
return
root_state = state.pop(0)
if not isinstance(root_state, dict):
raise Exception('Cannot recover from "{}"'.format(root_state))
if (verify and 'knsname' in root_state and
root_state['knsname'] != stage.knsname):
raise Exception(
'Recovered knsname "{}" and stage knsname "{}", are not '
"the same".format(root_state['knsname'], stage.knsname))
if 'knsname' in root_state:
del root_state['knsname']
if not len(state):
if len(stage.stages):
Logger.debug(
"Cannot find recovery info for children of {}".
format(stage))
elif len(stage.stages) != len(state):
raise Exception(
"The number of children stages ({}) for {} "
"doesn't match the number of stages recovered ({})"
.format(len(stage.stages), stage, len(state)))
else:
for child_stage, child_state in zip(stage.stages, state):
apply_state(child_stage, child_state)
stage.restored_properties = root_state
apply_state(stage, state)
| {
"repo_name": "matham/moa",
"path": "moa/app.py",
"copies": "1",
"size": "10612",
"license": "mit",
"hash": -7219055871107948000,
"line_mean": 36.3661971831,
"line_max": 78,
"alpha_frac": 0.5397663023,
"autogenerated": false,
"ratio": 4.550600343053174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 284
} |
"""App
The Sebureem web-server based on Bottle
"""
from flask import url_for, redirect, request, render_template, json
from playhouse.shortcuts import model_to_dict
from sebureem import app, db
from sebureem.models import Sebura, Sebuks
@app.route('/comments/<topic>', methods=['GET', 'POST'])
def comment(topic):
if request.method == 'POST':
print(request.form)
comment_text = request.form['text']
print("Adding comment to topic {} : {}".format(topic, comment_text))
db.connect()
sebuks, created = Sebuks.get_or_create(name=topic)
Sebura.create(
topic=sebuks,
text=comment_text
)
db.close()
db.connect()
sebureem = Sebura.select().join(Sebuks).where(Sebuks.name == topic)
db.close()
return render_template('sebureem.html', topic=topic, comments=sebureem)
# Routes for managing comments
@app.route('/api/<topic>', methods=['GET'])
def get_comments(topic):
"""Get all comments for a given subject
"""
print("Fetching comments for topic {}".format(topic))
db.connect()
sebuks = Sebuks.get(Sebuks.name == topic)
db.close()
return json.jsonify(model_to_dict(sebuks, backrefs=True))
@app.route('/api/<topic>', methods=['POST'])
def post_comment(topic):
"""Post a comment to a given subject
"""
print(request.form)
comment_text = request.form['text']
print("Adding comment to topic {} : {}".format(topic, comment_text))
db.connect()
sebuks = Sebuks.get(Sebuks.name == topic)
Sebura.create(
topic=sebuks,
text=comment_text
)
db.close()
return redirect(url_for('get_comments', topic=topic))
@app.route('/api/<topic>/<id>', methods=['GET'])
def edit_comment(topic, id):
"""Edit a given comment
"""
return redirect(url_for('get_comments', topic=topic))
@app.route('/api/<topic>/<id>', methods=['GET'])
def delete_comment(topic, id):
"""Delete a given comment
"""
return redirect(url_for('get_comments', topic=topic))
| {
"repo_name": "Erwhann-Rouge/sebureem",
"path": "sebureem/views.py",
"copies": "1",
"size": "2062",
"license": "bsd-3-clause",
"hash": -8559075031679863000,
"line_mean": 25.4358974359,
"line_max": 76,
"alpha_frac": 0.6304558681,
"autogenerated": false,
"ratio": 3.4139072847682117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45443631528682116,
"avg_score": null,
"num_lines": null
} |
"""AppointmentGroups API Tests for Version 1.0.
This is a testing template for the generated AppointmentGroupsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.appointment_groups import AppointmentGroupsAPI
from py3canvas.apis.appointment_groups import Appointmentgroup
from py3canvas.apis.appointment_groups import Appointment
class TestAppointmentGroupsAPI(unittest.TestCase):
"""Tests for the AppointmentGroupsAPI."""
def setUp(self):
self.client = AppointmentGroupsAPI(secrets.instance_address, secrets.access_token)
def test_list_appointment_groups(self):
"""Integration test for the AppointmentGroupsAPI.list_appointment_groups method."""
r = self.client.list_appointment_groups(context_codes=None, include=None, include_past_appointments=None, scope=None)
def test_create_appointment_group(self):
"""Integration test for the AppointmentGroupsAPI.create_appointment_group method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_get_single_appointment_group(self):
"""Integration test for the AppointmentGroupsAPI.get_single_appointment_group method."""
id = None # Change me!!
r = self.client.get_single_appointment_group(id, include=None)
def test_update_appointment_group(self):
"""Integration test for the AppointmentGroupsAPI.update_appointment_group method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_delete_appointment_group(self):
"""Integration test for the AppointmentGroupsAPI.delete_appointment_group method."""
id = None # Change me!!
r = self.client.delete_appointment_group(id, cancel_reason=None)
def test_list_user_participants(self):
"""Integration test for the AppointmentGroupsAPI.list_user_participants method."""
id = None # Change me!!
r = self.client.list_user_participants(id, registration_status=None)
def test_list_student_group_participants(self):
"""Integration test for the AppointmentGroupsAPI.list_student_group_participants method."""
id = None # Change me!!
r = self.client.list_student_group_participants(id, registration_status=None)
def test_get_next_appointment(self):
"""Integration test for the AppointmentGroupsAPI.get_next_appointment method."""
r = self.client.get_next_appointment(appointment_group_ids=None)
| {
"repo_name": "tylerclair/py3canvas",
"path": "py3canvas/tests/appointment_groups.py",
"copies": "1",
"size": "2679",
"license": "mit",
"hash": -8697798763275353000,
"line_mean": 41.2096774194,
"line_max": 126,
"alpha_frac": 0.7118327734,
"autogenerated": false,
"ratio": 4.153488372093023,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5365321145493023,
"avg_score": null,
"num_lines": null
} |
"""AppointmentGroups API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class AppointmentGroupsAPI(BaseCanvasAPI):
"""AppointmentGroups API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for AppointmentGroupsAPI."""
super(AppointmentGroupsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.AppointmentGroupsAPI")
def list_appointment_groups(self, context_codes=None, include=None, include_past_appointments=None, scope=None):
"""
List appointment groups.
Retrieve the list of appointment groups that can be reserved or managed by
the current user.
"""
path = {}
data = {}
params = {}
# OPTIONAL - scope
"""Defaults to 'reservable'"""
if scope is not None:
self._validate_enum(scope, ["reservable", "manageable"])
params["scope"] = scope
# OPTIONAL - context_codes
"""Array of context codes used to limit returned results."""
if context_codes is not None:
params["context_codes"] = context_codes
# OPTIONAL - include_past_appointments
"""Defaults to false. If true, includes past appointment groups"""
if include_past_appointments is not None:
params["include_past_appointments"] = include_past_appointments
# OPTIONAL - include
"""Array of additional information to include.
"appointments":: calendar event time slots for this appointment group
"child_events":: reservations of those time slots
"participant_count":: number of reservations
"reserved_times":: the event id, start time and end time of reservations
the current user has made)
"all_context_codes":: all context codes associated with this appointment group"""
if include is not None:
self._validate_enum(include, ["appointments", "child_events", "participant_count", "reserved_times", "all_context_codes"])
params["include"] = include
self.logger.debug("GET /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
def create_appointment_group(self, appointment_group_title, appointment_group_context_codes, appointment_group_description=None, appointment_group_location_address=None, appointment_group_location_name=None, appointment_group_max_appointments_per_participant=None, appointment_group_min_appointments_per_participant=None, appointment_group_new_appointments_X=None, appointment_group_participant_visibility=None, appointment_group_participants_per_appointment=None, appointment_group_publish=None, appointment_group_sub_context_codes=None):
"""
Create an appointment group.
Create and return a new appointment group. If new_appointments are
specified, the response will return a new_appointments array (same format
as appointments array, see "List appointment groups" action)
"""
path = {}
data = {}
params = {}
# REQUIRED - appointment_group[context_codes]
"""Array of context codes (courses, e.g. course_1) this group should be
linked to (1 or more). Users in the course(s) with appropriate permissions
will be able to sign up for this appointment group."""
data["appointment_group[context_codes]"] = appointment_group_context_codes
# OPTIONAL - appointment_group[sub_context_codes]
"""Array of sub context codes (course sections or a single group category)
this group should be linked to. Used to limit the appointment group to
particular sections. If a group category is specified, students will sign
up in groups and the participant_type will be "Group" instead of "User"."""
if appointment_group_sub_context_codes is not None:
data["appointment_group[sub_context_codes]"] = appointment_group_sub_context_codes
# REQUIRED - appointment_group[title]
"""Short title for the appointment group."""
data["appointment_group[title]"] = appointment_group_title
# OPTIONAL - appointment_group[description]
"""Longer text description of the appointment group."""
if appointment_group_description is not None:
data["appointment_group[description]"] = appointment_group_description
# OPTIONAL - appointment_group[location_name]
"""Location name of the appointment group."""
if appointment_group_location_name is not None:
data["appointment_group[location_name]"] = appointment_group_location_name
# OPTIONAL - appointment_group[location_address]
"""Location address."""
if appointment_group_location_address is not None:
data["appointment_group[location_address]"] = appointment_group_location_address
# OPTIONAL - appointment_group[publish]
"""Indicates whether this appointment group should be published (i.e. made
available for signup). Once published, an appointment group cannot be
unpublished. Defaults to false."""
if appointment_group_publish is not None:
data["appointment_group[publish]"] = appointment_group_publish
# OPTIONAL - appointment_group[participants_per_appointment]
"""Maximum number of participants that may register for each time slot.
Defaults to null (no limit)."""
if appointment_group_participants_per_appointment is not None:
data["appointment_group[participants_per_appointment]"] = appointment_group_participants_per_appointment
# OPTIONAL - appointment_group[min_appointments_per_participant]
"""Minimum number of time slots a user must register for. If not set, users
do not need to sign up for any time slots."""
if appointment_group_min_appointments_per_participant is not None:
data["appointment_group[min_appointments_per_participant]"] = appointment_group_min_appointments_per_participant
# OPTIONAL - appointment_group[max_appointments_per_participant]
"""Maximum number of time slots a user may register for."""
if appointment_group_max_appointments_per_participant is not None:
data["appointment_group[max_appointments_per_participant]"] = appointment_group_max_appointments_per_participant
# OPTIONAL - appointment_group[new_appointments][X]
"""Nested array of start time/end time pairs indicating time slots for this
appointment group. Refer to the example request."""
if appointment_group_new_appointments_X is not None:
data["appointment_group[new_appointments][X]"] = appointment_group_new_appointments_X
# OPTIONAL - appointment_group[participant_visibility]
""""private":: participants cannot see who has signed up for a particular
time slot
"protected":: participants can see who has signed up. Defaults to
"private"."""
if appointment_group_participant_visibility is not None:
self._validate_enum(appointment_group_participant_visibility, ["private", "protected"])
data["appointment_group[participant_visibility]"] = appointment_group_participant_visibility
self.logger.debug("POST /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
def get_single_appointment_group(self, id, include=None):
"""
Get a single appointment group.
Returns information for a single appointment group
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""Array of additional information to include. See include[] argument of
"List appointment groups" action.
"child_events":: reservations of time slots time slots
"appointments":: will always be returned
"all_context_codes":: all context codes associated with this appointment group"""
if include is not None:
self._validate_enum(include, ["child_events", "appointments", "all_context_codes"])
params["include"] = include
self.logger.debug("GET /api/v1/appointment_groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}".format(**path), data=data, params=params, no_data=True)
def update_appointment_group(self, id, appointment_group_context_codes, appointment_group_description=None, appointment_group_location_address=None, appointment_group_location_name=None, appointment_group_max_appointments_per_participant=None, appointment_group_min_appointments_per_participant=None, appointment_group_new_appointments_X=None, appointment_group_participant_visibility=None, appointment_group_participants_per_appointment=None, appointment_group_publish=None, appointment_group_sub_context_codes=None, appointment_group_title=None):
"""
Update an appointment group.
Update and return an appointment group. If new_appointments are specified,
the response will return a new_appointments array (same format as
appointments array, see "List appointment groups" action).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - appointment_group[context_codes]
"""Array of context codes (courses, e.g. course_1) this group should be
linked to (1 or more). Users in the course(s) with appropriate permissions
will be able to sign up for this appointment group."""
data["appointment_group[context_codes]"] = appointment_group_context_codes
# OPTIONAL - appointment_group[sub_context_codes]
"""Array of sub context codes (course sections or a single group category)
this group should be linked to. Used to limit the appointment group to
particular sections. If a group category is specified, students will sign
up in groups and the participant_type will be "Group" instead of "User"."""
if appointment_group_sub_context_codes is not None:
data["appointment_group[sub_context_codes]"] = appointment_group_sub_context_codes
# OPTIONAL - appointment_group[title]
"""Short title for the appointment group."""
if appointment_group_title is not None:
data["appointment_group[title]"] = appointment_group_title
# OPTIONAL - appointment_group[description]
"""Longer text description of the appointment group."""
if appointment_group_description is not None:
data["appointment_group[description]"] = appointment_group_description
# OPTIONAL - appointment_group[location_name]
"""Location name of the appointment group."""
if appointment_group_location_name is not None:
data["appointment_group[location_name]"] = appointment_group_location_name
# OPTIONAL - appointment_group[location_address]
"""Location address."""
if appointment_group_location_address is not None:
data["appointment_group[location_address]"] = appointment_group_location_address
# OPTIONAL - appointment_group[publish]
"""Indicates whether this appointment group should be published (i.e. made
available for signup). Once published, an appointment group cannot be
unpublished. Defaults to false."""
if appointment_group_publish is not None:
data["appointment_group[publish]"] = appointment_group_publish
# OPTIONAL - appointment_group[participants_per_appointment]
"""Maximum number of participants that may register for each time slot.
Defaults to null (no limit)."""
if appointment_group_participants_per_appointment is not None:
data["appointment_group[participants_per_appointment]"] = appointment_group_participants_per_appointment
# OPTIONAL - appointment_group[min_appointments_per_participant]
"""Minimum number of time slots a user must register for. If not set, users
do not need to sign up for any time slots."""
if appointment_group_min_appointments_per_participant is not None:
data["appointment_group[min_appointments_per_participant]"] = appointment_group_min_appointments_per_participant
# OPTIONAL - appointment_group[max_appointments_per_participant]
"""Maximum number of time slots a user may register for."""
if appointment_group_max_appointments_per_participant is not None:
data["appointment_group[max_appointments_per_participant]"] = appointment_group_max_appointments_per_participant
# OPTIONAL - appointment_group[new_appointments][X]
"""Nested array of start time/end time pairs indicating time slots for this
appointment group. Refer to the example request."""
if appointment_group_new_appointments_X is not None:
data["appointment_group[new_appointments][X]"] = appointment_group_new_appointments_X
# OPTIONAL - appointment_group[participant_visibility]
""""private":: participants cannot see who has signed up for a particular
time slot
"protected":: participants can see who has signed up. Defaults to "private"."""
if appointment_group_participant_visibility is not None:
self._validate_enum(appointment_group_participant_visibility, ["private", "protected"])
data["appointment_group[participant_visibility]"] = appointment_group_participant_visibility
self.logger.debug("PUT /api/v1/appointment_groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/appointment_groups/{id}".format(**path), data=data, params=params, no_data=True)
def delete_appointment_group(self, id, cancel_reason=None):
"""
Delete an appointment group.
Delete an appointment group (and associated time slots and reservations)
and return the deleted group
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - cancel_reason
"""Reason for deleting/canceling the appointment group."""
if cancel_reason is not None:
params["cancel_reason"] = cancel_reason
self.logger.debug("DELETE /api/v1/appointment_groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/appointment_groups/{id}".format(**path), data=data, params=params, no_data=True)
def list_user_participants(self, id, registration_status=None):
"""
List user participants.
List users that are (or may be) participating in this appointment group.
Refer to the Users API for the response fields. Returns no results for
appointment groups with the "Group" participant_type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - registration_status
"""Limits results to the a given participation status, defaults to 'all'"""
if registration_status is not None:
self._validate_enum(registration_status, ["all", "registered", "registered"])
params["registration_status"] = registration_status
self.logger.debug("GET /api/v1/appointment_groups/{id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}/users".format(**path), data=data, params=params, no_data=True)
def list_student_group_participants(self, id, registration_status=None):
"""
List student group participants.
List student groups that are (or may be) participating in this appointment
group. Refer to the Groups API for the response fields. Returns no results
for appointment groups with the "User" participant_type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - registration_status
"""Limits results to the a given participation status, defaults to 'all'"""
if registration_status is not None:
self._validate_enum(registration_status, ["all", "registered", "registered"])
params["registration_status"] = registration_status
self.logger.debug("GET /api/v1/appointment_groups/{id}/groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}/groups".format(**path), data=data, params=params, no_data=True)
def get_next_appointment(self, appointment_group_ids=None):
"""
Get next appointment.
Return the next appointment available to sign up for. The appointment
is returned in a one-element array. If no future appointments are
available, an empty array is returned.
"""
path = {}
data = {}
params = {}
# OPTIONAL - appointment_group_ids
"""List of ids of appointment groups to search."""
if appointment_group_ids is not None:
params["appointment_group_ids"] = appointment_group_ids
self.logger.debug("GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/next_appointment".format(**path), data=data, params=params, all_pages=True)
class Appointmentgroup(BaseModel):
"""Appointmentgroup Model."""
def __init__(self, participant_visibility=None, updated_at=None, context_codes=None, participant_type=None, end_at=None, id=None, participants_per_appointment=None, title=None, new_appointments=None, min_appointments_per_participant=None, appointments_count=None, start_at=None, description=None, participant_count=None, workflow_state=None, html_url=None, location_address=None, appointments=None, reserved_times=None, location_name=None, max_appointments_per_participant=None, url=None, created_at=None, sub_context_codes=None, requiring_action=None):
"""Init method for Appointmentgroup class."""
self._participant_visibility = participant_visibility
self._updated_at = updated_at
self._context_codes = context_codes
self._participant_type = participant_type
self._end_at = end_at
self._id = id
self._participants_per_appointment = participants_per_appointment
self._title = title
self._new_appointments = new_appointments
self._min_appointments_per_participant = min_appointments_per_participant
self._appointments_count = appointments_count
self._start_at = start_at
self._description = description
self._participant_count = participant_count
self._workflow_state = workflow_state
self._html_url = html_url
self._location_address = location_address
self._appointments = appointments
self._reserved_times = reserved_times
self._location_name = location_name
self._max_appointments_per_participant = max_appointments_per_participant
self._url = url
self._created_at = created_at
self._sub_context_codes = sub_context_codes
self._requiring_action = requiring_action
self.logger = logging.getLogger('py3canvas.Appointmentgroup')
@property
def participant_visibility(self):
"""'private' means participants cannot see who has signed up for a particular time slot, 'protected' means that they can."""
return self._participant_visibility
@participant_visibility.setter
def participant_visibility(self, value):
"""Setter for participant_visibility property."""
self.logger.warn("Setting values on participant_visibility will NOT update the remote Canvas instance.")
self._participant_visibility = value
@property
def updated_at(self):
"""When the appointment group was last updated."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn("Setting values on updated_at will NOT update the remote Canvas instance.")
self._updated_at = value
@property
def context_codes(self):
"""The context codes (i.e. courses) this appointment group belongs to. Only people in these courses will be eligible to sign up."""
return self._context_codes
@context_codes.setter
def context_codes(self, value):
"""Setter for context_codes property."""
self.logger.warn("Setting values on context_codes will NOT update the remote Canvas instance.")
self._context_codes = value
@property
def participant_type(self):
"""Indicates how participants sign up for the appointment group, either as individuals ('User') or in student groups ('Group'). Related to sub_context_codes (i.e. 'Group' signups always have a single group category)."""
return self._participant_type
@participant_type.setter
def participant_type(self, value):
"""Setter for participant_type property."""
self.logger.warn("Setting values on participant_type will NOT update the remote Canvas instance.")
self._participant_type = value
@property
def end_at(self):
"""The end of the last time slot in the appointment group."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn("Setting values on end_at will NOT update the remote Canvas instance.")
self._end_at = value
@property
def id(self):
"""The ID of the appointment group."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def participants_per_appointment(self):
"""Maximum number of participants that may register for each time slot, or null if no limit."""
return self._participants_per_appointment
@participants_per_appointment.setter
def participants_per_appointment(self, value):
"""Setter for participants_per_appointment property."""
self.logger.warn("Setting values on participants_per_appointment will NOT update the remote Canvas instance.")
self._participants_per_appointment = value
@property
def title(self):
"""The title of the appointment group."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def new_appointments(self):
"""Newly created time slots (same format as appointments above). Only returned in Create/Update responses where new time slots have been added."""
return self._new_appointments
@new_appointments.setter
def new_appointments(self, value):
"""Setter for new_appointments property."""
self.logger.warn("Setting values on new_appointments will NOT update the remote Canvas instance.")
self._new_appointments = value
@property
def min_appointments_per_participant(self):
"""Minimum number of time slots a user must register for. If not set, users do not need to sign up for any time slots."""
return self._min_appointments_per_participant
@min_appointments_per_participant.setter
def min_appointments_per_participant(self, value):
"""Setter for min_appointments_per_participant property."""
self.logger.warn("Setting values on min_appointments_per_participant will NOT update the remote Canvas instance.")
self._min_appointments_per_participant = value
@property
def appointments_count(self):
"""Number of time slots in this appointment group."""
return self._appointments_count
@appointments_count.setter
def appointments_count(self, value):
"""Setter for appointments_count property."""
self.logger.warn("Setting values on appointments_count will NOT update the remote Canvas instance.")
self._appointments_count = value
@property
def start_at(self):
"""The start of the first time slot in the appointment group."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def description(self):
"""The text description of the appointment group."""
return self._description
@description.setter
def description(self, value):
"""Setter for description property."""
self.logger.warn("Setting values on description will NOT update the remote Canvas instance.")
self._description = value
@property
def participant_count(self):
"""The number of participant who have reserved slots (see include[] argument)."""
return self._participant_count
@participant_count.setter
def participant_count(self, value):
"""Setter for participant_count property."""
self.logger.warn("Setting values on participant_count will NOT update the remote Canvas instance.")
self._participant_count = value
@property
def workflow_state(self):
"""Current state of the appointment group ('pending', 'active' or 'deleted'). 'pending' indicates that it has not been published yet and is invisible to participants."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def html_url(self):
"""URL for a user to view this appointment group."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def location_address(self):
"""The address of the appointment group's location."""
return self._location_address
@location_address.setter
def location_address(self, value):
"""Setter for location_address property."""
self.logger.warn("Setting values on location_address will NOT update the remote Canvas instance.")
self._location_address = value
@property
def appointments(self):
"""Calendar Events representing the time slots (see include[] argument) Refer to the Calendar Events API for more information."""
return self._appointments
@appointments.setter
def appointments(self, value):
"""Setter for appointments property."""
self.logger.warn("Setting values on appointments will NOT update the remote Canvas instance.")
self._appointments = value
@property
def reserved_times(self):
"""The start and end times of slots reserved by the current user as well as the id of the calendar event for the reservation (see include[] argument)."""
return self._reserved_times
@reserved_times.setter
def reserved_times(self, value):
"""Setter for reserved_times property."""
self.logger.warn("Setting values on reserved_times will NOT update the remote Canvas instance.")
self._reserved_times = value
@property
def location_name(self):
"""The location name of the appointment group."""
return self._location_name
@location_name.setter
def location_name(self, value):
"""Setter for location_name property."""
self.logger.warn("Setting values on location_name will NOT update the remote Canvas instance.")
self._location_name = value
@property
def max_appointments_per_participant(self):
"""Maximum number of time slots a user may register for, or null if no limit."""
return self._max_appointments_per_participant
@max_appointments_per_participant.setter
def max_appointments_per_participant(self, value):
"""Setter for max_appointments_per_participant property."""
self.logger.warn("Setting values on max_appointments_per_participant will NOT update the remote Canvas instance.")
self._max_appointments_per_participant = value
@property
def url(self):
"""URL for this appointment group (to update, delete, etc.)."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def created_at(self):
"""When the appointment group was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def sub_context_codes(self):
"""The sub-context codes (i.e. course sections and group categories) this appointment group is restricted to."""
return self._sub_context_codes
@sub_context_codes.setter
def sub_context_codes(self, value):
"""Setter for sub_context_codes property."""
self.logger.warn("Setting values on sub_context_codes will NOT update the remote Canvas instance.")
self._sub_context_codes = value
@property
def requiring_action(self):
"""Boolean indicating whether the current user needs to sign up for this appointment group (i.e. it's reservable and the min_appointments_per_participant limit has not been met by this user)."""
return self._requiring_action
@requiring_action.setter
def requiring_action(self, value):
"""Setter for requiring_action property."""
self.logger.warn("Setting values on requiring_action will NOT update the remote Canvas instance.")
self._requiring_action = value
class Appointment(BaseModel):
"""Appointment Model.
Date and time for an appointment"""
def __init__(self, start_at=None, id=None, end_at=None):
"""Init method for Appointment class."""
self._start_at = start_at
self._id = id
self._end_at = end_at
self.logger = logging.getLogger('py3canvas.Appointment')
@property
def start_at(self):
"""Start time for the appointment."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def id(self):
"""The appointment identifier."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def end_at(self):
"""End time for the appointment."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn("Setting values on end_at will NOT update the remote Canvas instance.")
self._end_at = value
| {
"repo_name": "tylerclair/py3canvas",
"path": "py3canvas/apis/appointment_groups.py",
"copies": "1",
"size": "33026",
"license": "mit",
"hash": 5549870121494199000,
"line_mean": 45.1902097902,
"line_max": 557,
"alpha_frac": 0.6678980197,
"autogenerated": false,
"ratio": 4.266373853507298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5434271873207298,
"avg_score": null,
"num_lines": null
} |
"""AppointmentGroups API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from base import BaseCanvasAPI
from base import BaseModel
class AppointmentGroupsAPI(BaseCanvasAPI):
"""AppointmentGroups API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for AppointmentGroupsAPI."""
super(AppointmentGroupsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("pycanvas.AppointmentGroupsAPI")
def list_appointment_groups(self, context_codes=None, include=None, include_past_appointments=None, scope=None):
"""
List appointment groups.
Retrieve the list of appointment groups that can be reserved or managed by
the current user.
"""
path = {}
data = {}
params = {}
# OPTIONAL - scope
"""Defaults to "reservable""""
if scope is not None:
self._validate_enum(scope, ["reservable", "manageable"])
params["scope"] = scope
# OPTIONAL - context_codes
"""Array of context codes used to limit returned results."""
if context_codes is not None:
params["context_codes"] = context_codes
# OPTIONAL - include_past_appointments
"""Defaults to false. If true, includes past appointment groups"""
if include_past_appointments is not None:
params["include_past_appointments"] = include_past_appointments
# OPTIONAL - include
"""Array of additional information to include.
"appointments":: calendar event time slots for this appointment group
"child_events":: reservations of those time slots
"participant_count":: number of reservations
"reserved_times":: the event id, start time and end time of reservations
the current user has made)
"all_context_codes":: all context codes associated with this appointment group"""
if include is not None:
self._validate_enum(include, ["appointments", "child_events", "participant_count", "reserved_times", "all_context_codes"])
params["include"] = include
self.logger.debug("GET /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
def create_appointment_group(self, appointment_group_title, appointment_group_context_codes, appointment_group_description=None, appointment_group_location_address=None, appointment_group_location_name=None, appointment_group_max_appointments_per_participant=None, appointment_group_min_appointments_per_participant=None, appointment_group_new_appointments_X=None, appointment_group_participant_visibility=None, appointment_group_participants_per_appointment=None, appointment_group_publish=None, appointment_group_sub_context_codes=None):
"""
Create an appointment group.
Create and return a new appointment group. If new_appointments are
specified, the response will return a new_appointments array (same format
as appointments array, see "List appointment groups" action)
"""
path = {}
data = {}
params = {}
# REQUIRED - appointment_group[context_codes]
"""Array of context codes (courses, e.g. course_1) this group should be
linked to (1 or more). Users in the course(s) with appropriate permissions
will be able to sign up for this appointment group."""
data["appointment_group[context_codes]"] = appointment_group_context_codes
# OPTIONAL - appointment_group[sub_context_codes]
"""Array of sub context codes (course sections or a single group category)
this group should be linked to. Used to limit the appointment group to
particular sections. If a group category is specified, students will sign
up in groups and the participant_type will be "Group" instead of "User"."""
if appointment_group_sub_context_codes is not None:
data["appointment_group[sub_context_codes]"] = appointment_group_sub_context_codes
# REQUIRED - appointment_group[title]
"""Short title for the appointment group."""
data["appointment_group[title]"] = appointment_group_title
# OPTIONAL - appointment_group[description]
"""Longer text description of the appointment group."""
if appointment_group_description is not None:
data["appointment_group[description]"] = appointment_group_description
# OPTIONAL - appointment_group[location_name]
"""Location name of the appointment group."""
if appointment_group_location_name is not None:
data["appointment_group[location_name]"] = appointment_group_location_name
# OPTIONAL - appointment_group[location_address]
"""Location address."""
if appointment_group_location_address is not None:
data["appointment_group[location_address]"] = appointment_group_location_address
# OPTIONAL - appointment_group[publish]
"""Indicates whether this appointment group should be published (i.e. made
available for signup). Once published, an appointment group cannot be
unpublished. Defaults to false."""
if appointment_group_publish is not None:
data["appointment_group[publish]"] = appointment_group_publish
# OPTIONAL - appointment_group[participants_per_appointment]
"""Maximum number of participants that may register for each time slot.
Defaults to null (no limit)."""
if appointment_group_participants_per_appointment is not None:
data["appointment_group[participants_per_appointment]"] = appointment_group_participants_per_appointment
# OPTIONAL - appointment_group[min_appointments_per_participant]
"""Minimum number of time slots a user must register for. If not set, users
do not need to sign up for any time slots."""
if appointment_group_min_appointments_per_participant is not None:
data["appointment_group[min_appointments_per_participant]"] = appointment_group_min_appointments_per_participant
# OPTIONAL - appointment_group[max_appointments_per_participant]
"""Maximum number of time slots a user may register for."""
if appointment_group_max_appointments_per_participant is not None:
data["appointment_group[max_appointments_per_participant]"] = appointment_group_max_appointments_per_participant
# OPTIONAL - appointment_group[new_appointments][X]
"""Nested array of start time/end time pairs indicating time slots for this
appointment group. Refer to the example request."""
if appointment_group_new_appointments_X is not None:
data["appointment_group[new_appointments][X]"] = appointment_group_new_appointments_X
# OPTIONAL - appointment_group[participant_visibility]
""""private":: participants cannot see who has signed up for a particular
time slot
"protected":: participants can see who has signed up. Defaults to
"private"."""
if appointment_group_participant_visibility is not None:
self._validate_enum(appointment_group_participant_visibility, ["private", "protected"])
data["appointment_group[participant_visibility]"] = appointment_group_participant_visibility
self.logger.debug("POST /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
def get_single_appointment_group(self, id, include=None):
"""
Get a single appointment group.
Returns information for a single appointment group
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - include
"""Array of additional information to include. See include[] argument of
"List appointment groups" action.
"child_events":: reservations of time slots time slots
"appointments":: will always be returned
"all_context_codes":: all context codes associated with this appointment group"""
if include is not None:
self._validate_enum(include, ["child_events", "appointments", "all_context_codes"])
params["include"] = include
self.logger.debug("GET /api/v1/appointment_groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}".format(**path), data=data, params=params, no_data=True)
def update_appointment_group(self, id, appointment_group_context_codes, appointment_group_description=None, appointment_group_location_address=None, appointment_group_location_name=None, appointment_group_max_appointments_per_participant=None, appointment_group_min_appointments_per_participant=None, appointment_group_new_appointments_X=None, appointment_group_participant_visibility=None, appointment_group_participants_per_appointment=None, appointment_group_publish=None, appointment_group_sub_context_codes=None, appointment_group_title=None):
"""
Update an appointment group.
Update and return an appointment group. If new_appointments are specified,
the response will return a new_appointments array (same format as
appointments array, see "List appointment groups" action).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - appointment_group[context_codes]
"""Array of context codes (courses, e.g. course_1) this group should be
linked to (1 or more). Users in the course(s) with appropriate permissions
will be able to sign up for this appointment group."""
data["appointment_group[context_codes]"] = appointment_group_context_codes
# OPTIONAL - appointment_group[sub_context_codes]
"""Array of sub context codes (course sections or a single group category)
this group should be linked to. Used to limit the appointment group to
particular sections. If a group category is specified, students will sign
up in groups and the participant_type will be "Group" instead of "User"."""
if appointment_group_sub_context_codes is not None:
data["appointment_group[sub_context_codes]"] = appointment_group_sub_context_codes
# OPTIONAL - appointment_group[title]
"""Short title for the appointment group."""
if appointment_group_title is not None:
data["appointment_group[title]"] = appointment_group_title
# OPTIONAL - appointment_group[description]
"""Longer text description of the appointment group."""
if appointment_group_description is not None:
data["appointment_group[description]"] = appointment_group_description
# OPTIONAL - appointment_group[location_name]
"""Location name of the appointment group."""
if appointment_group_location_name is not None:
data["appointment_group[location_name]"] = appointment_group_location_name
# OPTIONAL - appointment_group[location_address]
"""Location address."""
if appointment_group_location_address is not None:
data["appointment_group[location_address]"] = appointment_group_location_address
# OPTIONAL - appointment_group[publish]
"""Indicates whether this appointment group should be published (i.e. made
available for signup). Once published, an appointment group cannot be
unpublished. Defaults to false."""
if appointment_group_publish is not None:
data["appointment_group[publish]"] = appointment_group_publish
# OPTIONAL - appointment_group[participants_per_appointment]
"""Maximum number of participants that may register for each time slot.
Defaults to null (no limit)."""
if appointment_group_participants_per_appointment is not None:
data["appointment_group[participants_per_appointment]"] = appointment_group_participants_per_appointment
# OPTIONAL - appointment_group[min_appointments_per_participant]
"""Minimum number of time slots a user must register for. If not set, users
do not need to sign up for any time slots."""
if appointment_group_min_appointments_per_participant is not None:
data["appointment_group[min_appointments_per_participant]"] = appointment_group_min_appointments_per_participant
# OPTIONAL - appointment_group[max_appointments_per_participant]
"""Maximum number of time slots a user may register for."""
if appointment_group_max_appointments_per_participant is not None:
data["appointment_group[max_appointments_per_participant]"] = appointment_group_max_appointments_per_participant
# OPTIONAL - appointment_group[new_appointments][X]
"""Nested array of start time/end time pairs indicating time slots for this
appointment group. Refer to the example request."""
if appointment_group_new_appointments_X is not None:
data["appointment_group[new_appointments][X]"] = appointment_group_new_appointments_X
# OPTIONAL - appointment_group[participant_visibility]
""""private":: participants cannot see who has signed up for a particular
time slot
"protected":: participants can see who has signed up. Defaults to "private"."""
if appointment_group_participant_visibility is not None:
self._validate_enum(appointment_group_participant_visibility, ["private", "protected"])
data["appointment_group[participant_visibility]"] = appointment_group_participant_visibility
self.logger.debug("PUT /api/v1/appointment_groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/appointment_groups/{id}".format(**path), data=data, params=params, no_data=True)
def delete_appointment_group(self, id, cancel_reason=None):
"""
Delete an appointment group.
Delete an appointment group (and associated time slots and reservations)
and return the deleted group
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - cancel_reason
"""Reason for deleting/canceling the appointment group."""
if cancel_reason is not None:
params["cancel_reason"] = cancel_reason
self.logger.debug("DELETE /api/v1/appointment_groups/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/appointment_groups/{id}".format(**path), data=data, params=params, no_data=True)
def list_user_participants(self, id, registration_status=None):
"""
List user participants.
List users that are (or may be) participating in this appointment group.
Refer to the Users API for the response fields. Returns no results for
appointment groups with the "Group" participant_type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - registration_status
"""Limits results to the a given participation status, defaults to "all""""
if registration_status is not None:
self._validate_enum(registration_status, ["all", "registered", "registered"])
params["registration_status"] = registration_status
self.logger.debug("GET /api/v1/appointment_groups/{id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}/users".format(**path), data=data, params=params, no_data=True)
def list_student_group_participants(self, id, registration_status=None):
"""
List student group participants.
List student groups that are (or may be) participating in this appointment
group. Refer to the Groups API for the response fields. Returns no results
for appointment groups with the "User" participant_type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - registration_status
"""Limits results to the a given participation status, defaults to "all""""
if registration_status is not None:
self._validate_enum(registration_status, ["all", "registered", "registered"])
params["registration_status"] = registration_status
self.logger.debug("GET /api/v1/appointment_groups/{id}/groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}/groups".format(**path), data=data, params=params, no_data=True)
def get_next_appointment(self, appointment_group_ids=None):
"""
Get next appointment.
Return the next appointment available to sign up for. The appointment
is returned in a one-element array. If no future appointments are
available, an empty array is returned.
"""
path = {}
data = {}
params = {}
# OPTIONAL - appointment_group_ids
"""List of ids of appointment groups to search."""
if appointment_group_ids is not None:
params["appointment_group_ids"] = appointment_group_ids
self.logger.debug("GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/next_appointment".format(**path), data=data, params=params, all_pages=True)
class Appointmentgroup(BaseModel):
"""Appointmentgroup Model."""
def __init__(self, participant_visibility=None, updated_at=None, context_codes=None, participant_type=None, end_at=None, id=None, participants_per_appointment=None, title=None, new_appointments=None, min_appointments_per_participant=None, appointments_count=None, start_at=None, description=None, participant_count=None, workflow_state=None, html_url=None, location_address=None, appointments=None, reserved_times=None, location_name=None, max_appointments_per_participant=None, url=None, created_at=None, sub_context_codes=None, requiring_action=None):
"""Init method for Appointmentgroup class."""
self._participant_visibility = participant_visibility
self._updated_at = updated_at
self._context_codes = context_codes
self._participant_type = participant_type
self._end_at = end_at
self._id = id
self._participants_per_appointment = participants_per_appointment
self._title = title
self._new_appointments = new_appointments
self._min_appointments_per_participant = min_appointments_per_participant
self._appointments_count = appointments_count
self._start_at = start_at
self._description = description
self._participant_count = participant_count
self._workflow_state = workflow_state
self._html_url = html_url
self._location_address = location_address
self._appointments = appointments
self._reserved_times = reserved_times
self._location_name = location_name
self._max_appointments_per_participant = max_appointments_per_participant
self._url = url
self._created_at = created_at
self._sub_context_codes = sub_context_codes
self._requiring_action = requiring_action
self.logger = logging.getLogger('pycanvas.Appointmentgroup')
@property
def participant_visibility(self):
"""'private' means participants cannot see who has signed up for a particular time slot, 'protected' means that they can."""
return self._participant_visibility
@participant_visibility.setter
def participant_visibility(self, value):
"""Setter for participant_visibility property."""
self.logger.warn("Setting values on participant_visibility will NOT update the remote Canvas instance.")
self._participant_visibility = value
@property
def updated_at(self):
"""When the appointment group was last updated."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn("Setting values on updated_at will NOT update the remote Canvas instance.")
self._updated_at = value
@property
def context_codes(self):
"""The context codes (i.e. courses) this appointment group belongs to. Only people in these courses will be eligible to sign up."""
return self._context_codes
@context_codes.setter
def context_codes(self, value):
"""Setter for context_codes property."""
self.logger.warn("Setting values on context_codes will NOT update the remote Canvas instance.")
self._context_codes = value
@property
def participant_type(self):
"""Indicates how participants sign up for the appointment group, either as individuals ('User') or in student groups ('Group'). Related to sub_context_codes (i.e. 'Group' signups always have a single group category)."""
return self._participant_type
@participant_type.setter
def participant_type(self, value):
"""Setter for participant_type property."""
self.logger.warn("Setting values on participant_type will NOT update the remote Canvas instance.")
self._participant_type = value
@property
def end_at(self):
"""The end of the last time slot in the appointment group."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn("Setting values on end_at will NOT update the remote Canvas instance.")
self._end_at = value
@property
def id(self):
"""The ID of the appointment group."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def participants_per_appointment(self):
"""Maximum number of participants that may register for each time slot, or null if no limit."""
return self._participants_per_appointment
@participants_per_appointment.setter
def participants_per_appointment(self, value):
"""Setter for participants_per_appointment property."""
self.logger.warn("Setting values on participants_per_appointment will NOT update the remote Canvas instance.")
self._participants_per_appointment = value
@property
def title(self):
"""The title of the appointment group."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def new_appointments(self):
"""Newly created time slots (same format as appointments above). Only returned in Create/Update responses where new time slots have been added."""
return self._new_appointments
@new_appointments.setter
def new_appointments(self, value):
"""Setter for new_appointments property."""
self.logger.warn("Setting values on new_appointments will NOT update the remote Canvas instance.")
self._new_appointments = value
@property
def min_appointments_per_participant(self):
"""Minimum number of time slots a user must register for. If not set, users do not need to sign up for any time slots."""
return self._min_appointments_per_participant
@min_appointments_per_participant.setter
def min_appointments_per_participant(self, value):
"""Setter for min_appointments_per_participant property."""
self.logger.warn("Setting values on min_appointments_per_participant will NOT update the remote Canvas instance.")
self._min_appointments_per_participant = value
@property
def appointments_count(self):
"""Number of time slots in this appointment group."""
return self._appointments_count
@appointments_count.setter
def appointments_count(self, value):
"""Setter for appointments_count property."""
self.logger.warn("Setting values on appointments_count will NOT update the remote Canvas instance.")
self._appointments_count = value
@property
def start_at(self):
"""The start of the first time slot in the appointment group."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def description(self):
"""The text description of the appointment group."""
return self._description
@description.setter
def description(self, value):
"""Setter for description property."""
self.logger.warn("Setting values on description will NOT update the remote Canvas instance.")
self._description = value
@property
def participant_count(self):
"""The number of participant who have reserved slots (see include[] argument)."""
return self._participant_count
@participant_count.setter
def participant_count(self, value):
"""Setter for participant_count property."""
self.logger.warn("Setting values on participant_count will NOT update the remote Canvas instance.")
self._participant_count = value
@property
def workflow_state(self):
"""Current state of the appointment group ('pending', 'active' or 'deleted'). 'pending' indicates that it has not been published yet and is invisible to participants."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def html_url(self):
"""URL for a user to view this appointment group."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def location_address(self):
"""The address of the appointment group's location."""
return self._location_address
@location_address.setter
def location_address(self, value):
"""Setter for location_address property."""
self.logger.warn("Setting values on location_address will NOT update the remote Canvas instance.")
self._location_address = value
@property
def appointments(self):
"""Calendar Events representing the time slots (see include[] argument) Refer to the Calendar Events API for more information."""
return self._appointments
@appointments.setter
def appointments(self, value):
"""Setter for appointments property."""
self.logger.warn("Setting values on appointments will NOT update the remote Canvas instance.")
self._appointments = value
@property
def reserved_times(self):
"""The start and end times of slots reserved by the current user as well as the id of the calendar event for the reservation (see include[] argument)."""
return self._reserved_times
@reserved_times.setter
def reserved_times(self, value):
"""Setter for reserved_times property."""
self.logger.warn("Setting values on reserved_times will NOT update the remote Canvas instance.")
self._reserved_times = value
@property
def location_name(self):
"""The location name of the appointment group."""
return self._location_name
@location_name.setter
def location_name(self, value):
"""Setter for location_name property."""
self.logger.warn("Setting values on location_name will NOT update the remote Canvas instance.")
self._location_name = value
@property
def max_appointments_per_participant(self):
"""Maximum number of time slots a user may register for, or null if no limit."""
return self._max_appointments_per_participant
@max_appointments_per_participant.setter
def max_appointments_per_participant(self, value):
"""Setter for max_appointments_per_participant property."""
self.logger.warn("Setting values on max_appointments_per_participant will NOT update the remote Canvas instance.")
self._max_appointments_per_participant = value
@property
def url(self):
"""URL for this appointment group (to update, delete, etc.)."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def created_at(self):
"""When the appointment group was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def sub_context_codes(self):
"""The sub-context codes (i.e. course sections and group categories) this appointment group is restricted to."""
return self._sub_context_codes
@sub_context_codes.setter
def sub_context_codes(self, value):
"""Setter for sub_context_codes property."""
self.logger.warn("Setting values on sub_context_codes will NOT update the remote Canvas instance.")
self._sub_context_codes = value
@property
def requiring_action(self):
"""Boolean indicating whether the current user needs to sign up for this appointment group (i.e. it's reservable and the min_appointments_per_participant limit has not been met by this user)."""
return self._requiring_action
@requiring_action.setter
def requiring_action(self, value):
"""Setter for requiring_action property."""
self.logger.warn("Setting values on requiring_action will NOT update the remote Canvas instance.")
self._requiring_action = value
class Appointment(BaseModel):
"""Appointment Model.
Date and time for an appointment"""
def __init__(self, start_at=None, id=None, end_at=None):
"""Init method for Appointment class."""
self._start_at = start_at
self._id = id
self._end_at = end_at
self.logger = logging.getLogger('pycanvas.Appointment')
@property
def start_at(self):
"""Start time for the appointment."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def id(self):
"""The appointment identifier."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def end_at(self):
"""End time for the appointment."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn("Setting values on end_at will NOT update the remote Canvas instance.")
self._end_at = value
| {
"repo_name": "PGower/PyCanvas",
"path": "pycanvas/apis/appointment_groups.py",
"copies": "1",
"size": "33736",
"license": "mit",
"hash": 5976230408918029000,
"line_mean": 45.1832167832,
"line_max": 557,
"alpha_frac": 0.6537526678,
"autogenerated": false,
"ratio": 4.355280144590757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5509032812390757,
"avg_score": null,
"num_lines": null
} |
#app = publications
import models
from django.contrib import admin
from django import forms
from django.core import urlresolvers
from widgetry.tabs.placeholderadmin import ModelAdminWithTabsAndCMSPlaceholder
from arkestra_utilities.admin_mixins import AutocompleteMixin, ButtonLinkWidget
from contacts_and_people.admin import PersonAdmin
from contacts_and_people.models import Person
class ResearcherForm(forms.ModelForm):
class Meta:
model = models.Researcher
# research_synopsis = forms.CharField(widget=WYMEditor, required=False)
# research_description = forms.CharField(widget=WYMEditor, required=False)
def clean(self):
if self.cleaned_data["symplectic_access"] and \
self.cleaned_data["person"]:
person = self.cleaned_data["person"]
if not person.surname or person.surname == '':
raise forms.ValidationError("""
Symplectic will not allow access until this Researcher has
a Surname
""")
if not person.institutional_username or \
person.institutional_username == '':
raise forms.ValidationError("""
Symplectic will not allow access until this Researcher has
a Username
""")
if not person.email or person.email == '':
raise forms.ValidationError("""
Symplectic will not allow access until this Researcher has
an email address
""")
return self.cleaned_data
class ResearcherAdmin(AutocompleteMixin, ModelAdminWithTabsAndCMSPlaceholder):
def _media(self):
return super(
AutocompleteMixin,
self).media + super(
ModelAdminWithTabsAndCMSPlaceholder,
self).media
media = property(_media)
actions = None
basic_fieldset = [None, {'fields': ['publishes']}]
synopsis_fieldset = ['Brief synopsis of research', {
'fields': ['synopsis'],
'classes': ['plugin-holder', 'plugin-holder-nopage']
}]
description_fieldset = ['Fuller research description', {
'fields': ['description'],
'classes': ['plugin-holder', 'plugin-holder-nopage']
}]
advanced_fieldset = ['Symplectic [Advanced Options]', {
'fields': ['symplectic_access', 'symplectic_id', 'person'],
'classes': ['xcollapsed'],
}]
tabs = [
['Research', {'fieldsets': [
basic_fieldset, synopsis_fieldset, description_fieldset
]}],
['Advanced Options', {'fieldsets': [advanced_fieldset]}],
]
# readonly_fields=["person",]
list_display = ('person', 'publishes', 'symplectic_access')
list_editable = ('publishes', 'symplectic_access')
list_filter = ('publishes', 'symplectic_access')
form = ResearcherForm
ordering = ('person__surname',)
search_fields = (
'person__surname',
'person__given_name',
'person__institutional_username'
)
related_search_fields = {'person': ('surname', 'given_name')}
admin.site.register(models.Researcher, ResearcherAdmin)
class SupervisionInline(AutocompleteMixin, admin.TabularInline):
model = models.Supervision
search_fields = [
'researcher__person__surname',
'researcher__person__given_name'
]
# it doesn't seem to be necessary to specify the search fields:
related_search_fields = {
"student": [],
"supervisor": [],
}
class SupervisionAdmin(AutocompleteMixin, admin.ModelAdmin):
model = models.Supervision
search_fields = [
'student__researcher__person__surname',
'student__researcher__person__given_name',
'supervisor__researcher__person__surname',
'supervisor__researcher__person__given_name'
]
list_display = ['student', 'supervisor']
# it doesn't seem to be necessary to specify the search fields:
related_search_fields = {
'student': [],
'supervisor': []
}
ordering = ('student__researcher__person__surname',)
# this admin class is not registered - here for debugging convenience
# admin.site.register(models.Supervision, SupervisionAdmin)
class StudentAdmin(AutocompleteMixin, admin.ModelAdmin):
search_fields = (
'researcher__person__surname',
'researcher__person__given_name',
'researcher__person__institutional_username'
)
related_search_fields = {'researcher': ('surname', 'given_name')}
inlines = [SupervisionInline]
ordering = ('researcher__person__surname',)
admin.site.register(models.Student, StudentAdmin)
class SupervisorAdmin(AutocompleteMixin, admin.ModelAdmin):
search_fields = (
'researcher__person__surname', 'researcher__person__given_name',
'researcher__person__institutional_username'
)
related_search_fields = {'researcher': ('surname', 'given_name')}
inlines = [SupervisionInline]
ordering = ('researcher__person__surname',)
admin.site.register(models.Supervisor, SupervisorAdmin)
class ResearcherInlineForm(forms.ModelForm):
class Meta:
model = models.Researcher
# a button to link to admin:publications_researcher_change for this person
buttonlink = forms.Field(
widget=ButtonLinkWidget,
required=False,
label="Research profile",
help_text="""
Once this Person has been saved, research-related information can
be edited.
""")
# and put some values on the button
def __init__(self, *args, **kwargs):
super(ResearcherInlineForm, self).__init__(*args, **kwargs)
# Set the form fields based on the model object
if self.instance.pk:
instance = kwargs['instance']
fields = urlresolvers.reverse(
'admin:publications_researcher_change',
args=[instance.person.id]
)
self.fields["buttonlink"].widget.attrs["link"] = fields
text = "Edit %s's research profile" % unicode(instance.person)
self.initial['buttonlink'] = text
help_text = "Edit research-related information in a new window."
self.fields["buttonlink"].help_text = help_text
class ResearcherInline(admin.StackedInline):
def __init__(self, attrs=None, *args, **kwargs):
super(ResearcherInline, self).__init__(attrs, *args, **kwargs)
def get_formset(self, request, obj=None, **kwargs):
# first test to see if the Person is also a Researcher
try:
researcher = obj.researcher
# can't get researcher?
except (models.Researcher.DoesNotExist, AttributeError):
# then remove the buttonlink if it's present
if 'buttonlink' in self.fields:
self.fields.remove("buttonlink")
# but if we can get a researcher
else:
# researcher.publishes but no buttonlink? add a buttonlink
if not 'buttonlink' in self.fields and researcher.publishes:
self.fields.append('buttonlink')
# researcher doesn't publish, but there is a buttonlink hanging
# around? delete it
elif 'buttonlink' in self.fields and not obj.researcher.publishes:
self.fields.remove("buttonlink")
formset = super(ResearcherInline, self).get_formset(
request, obj=None, **kwargs
)
return formset
fields = ['publishes']
form = ResearcherInlineForm
model = models.Researcher
# unregister and then re-register the PersonAdmin, to accommodate our messing
# about above
admin.site.unregister(Person)
PersonAdmin.tabs.append(('Research', {'inlines': [ResearcherInline]}))
admin.site.register(Person, PersonAdmin)
| {
"repo_name": "evildmp/arkestra-publications",
"path": "publications/admin.py",
"copies": "1",
"size": "8117",
"license": "bsd-2-clause",
"hash": 8005268742710844000,
"line_mean": 33.9159292035,
"line_max": 79,
"alpha_frac": 0.6100776149,
"autogenerated": false,
"ratio": 4.361633530360021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5471711145260021,
"avg_score": null,
"num_lines": null
} |
# app.py created from http://charlesleifer.com/blog/how-to-make-a-flask-blog-in-one-hour-or-less/
import datetime
import functools
import os
import re
import urllib
from flask import (Flask, abort, flash, Markup, redirect, render_template,
request, Response, session, url_for)
from markdown import markdown
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.extra import ExtraExtension
from micawber import bootstrap_basic, parse_html
from micawber.cache import Cache as OEmbedCache
from peewee import *
from playhouse.flask_utils import FlaskDB, get_object_or_404, object_list
from playhouse.sqlite_ext import *
ADMIN_PASSWORD = 'secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
DATABASE = 'sqliteext:///%s' % os.path.join(APP_DIR, 'blog.db')
DEBUG = False
SECRET_KEY = 'shhh, secret!' # Used by Flask to encrypt session cookie.
SITE_WIDTH = 800
app = Flask(__name__)
app.config.from_object(__name__)
flask_db = FlaskDB(app)
database = flask_db.database
oembed_providers = bootstrap_basic(OEmbedCache())
# database structure code
class Entry(flask_db.Model):
title = CharField()
slug = CharField(unique=True)
content = TextField()
published = BooleanField(index=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
@property
def html_content(self):
hilite = CodeHiliteExtension(linenums=False, css_class='highlight')
extras = ExtraExtension()
markdown_content = markdown(self.content, extensions=[hilite, extras])
oembed_content = parse_html(
markdown_content,
oembed_providers,
urlize_all=True,
maxwidth=app.config['SITE_WIDTH'])
return Markup(oembed_content)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = re.sub('[^\w]+', '-', self.title.lower())
ret = super(Entry, self).save(*args, **kwargs)
# Store search content.
self.update_search_index()
return ret
def update_search_index(self):
try:
fts_entry = FTSEntry.get(FTSEntry.entry_id == self.id)
except FTSEntry.DoesNotExist:
fts_entry = FTSEntry(entry_id=self.id)
force_insert = True
else:
force_insert = False
fts_entry.content = '\n'.join((self.title, self.content))
fts_entry.save(force_insert=force_insert)
@classmethod
def public(cls):
return Entry.select().where(Entry.published == True)
@classmethod
def drafts(cls):
return Entry.select().where(Entry.published == False)
@classmethod
def search(cls, query):
words = [word.strip() for word in query.split() if word.strip()]
if not words:
# Return empty query.
return Entry.select().where(Entry.id == 0)
else:
search = ' '.join(words)
return (FTSEntry
.select(
FTSEntry,
Entry,
FTSEntry.rank().alias('score'))
.join(Entry, on=(FTSEntry.entry_id == Entry.id).alias('entry'))
.where(
(Entry.published == True) &
(FTSEntry.match(search)))
.order_by(SQL('score').desc()))
class FTSEntry(FTSModel):
entry_id = IntegerField()
content = TextField()
class Meta:
database = database
def login_required(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if session.get('logged_in'):
return fn(*args, **kwargs)
return redirect(url_for('login', next=request.path))
return inner
@app.route('/login/', methods=['GET', 'POST'])
def login():
next_url = request.args.get('next') or request.form.get('next')
if request.method == 'POST' and request.form.get('password'):
password = request.form.get('password')
if password == app.config['ADMIN_PASSWORD']:
session['logged_in'] = True
session.permanent = True # Use cookie to store session.
flash('You are now logged in.', 'success')
return redirect(next_url or url_for('index'))
else:
flash('Incorrect password.', 'danger')
return render_template('login.html', next_url=next_url)
@app.route('/logout/', methods=['GET', 'POST'])
def logout():
if request.method == 'POST':
session.clear()
return redirect(url_for('login'))
return render_template('logout.html')
@app.route('/')
def index():
search_query = request.args.get('q')
if search_query:
query = Entry.search(search_query)
else:
query = Entry.public().order_by(Entry.timestamp.desc())
return object_list('index.html', query, search=search_query)
def _create_or_edit(entry, template):
if request.method == 'POST':
entry.title = request.form.get('title') or ''
entry.content = request.form.get('content') or ''
entry.published = request.form.get('published') or False
if not (entry.title and entry.content):
flash('Title and Content are required.', 'danger')
else:
# Wrap the call to save in a transaction so we can roll it back
# cleanly in the event of an integrity error.
try:
with database.atomic():
entry.save()
except IntegrityError:
flash('Error: this title is already in use.', 'danger')
else:
flash('Entry saved successfully.', 'success')
if entry.published:
return redirect(url_for('detail', slug=entry.slug))
else:
return redirect(url_for('edit', slug=entry.slug))
return render_template(template, entry=entry)
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def create():
return _create_or_edit(Entry(title='', content=''), 'create.html')
@app.route('/drafts/')
@login_required
def drafts():
query = Entry.drafts().order_by(Entry.timestamp.desc())
return object_list('index.html', query, check_bounds=False)
@app.route('/<slug>/')
def detail(slug):
if session.get('logged_in'):
query = Entry.select()
else:
query = Entry.public()
entry = get_object_or_404(query, Entry.slug == slug)
return render_template('detail.html', entry=entry)
@app.route('/<slug>/edit/', methods=['GET', 'POST'])
@login_required
def edit(slug):
entry = get_object_or_404(Entry, Entry.slug == slug)
return _create_or_edit(entry, 'edit.html')
@app.template_filter('clean_querystring')
def clean_querystring(request_args, *keys_to_remove, **new_values):
querystring = dict((key, value) for key, value in request_args.items())
for key in keys_to_remove:
querystring.pop(key, None)
querystring.update(new_values)
return urllib.urlencode(querystring)
@app.errorhandler(404)
def not_found(exc):
return Response('<h3>Still not working!!!</h3>'), 404
def main():
database.create_tables([Entry, FTSEntry], safe=True)
app.run(debug=True)
if __name__ == '__main__':
main()
| {
"repo_name": "artopping/nyu-python",
"path": "course3/assignments/app/app.py",
"copies": "1",
"size": "7205",
"license": "mit",
"hash": 4548576947512361000,
"line_mean": 32.3564814815,
"line_max": 97,
"alpha_frac": 0.6154059681,
"autogenerated": false,
"ratio": 3.733160621761658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4848566589861658,
"avg_score": null,
"num_lines": null
} |
import os
import json
from flask import Flask, request, redirect, url_for, render_template, send_from_directory
from werkzeug.utils import secure_filename
from nltk.tokenize import RegexpTokenizer
from collections import Counter
from pymongo import MongoClient
import datetime
import hashlib
client = MongoClient('localhost', 27017)
db = client.analysisWebApp # Database
UPLOAD_FOLDER = 'C:/Users/danha/Desktop/webapp/uploads/'
ALLOWED_EXTENSIONS = set(['txt'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
##Make sure file has an allowed extension.
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
##Homepage.
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('home.html',
title='Data Analysis App')
##Analyse page, taken to after form is submitted.
@app.route('/analyse', methods=['GET', 'POST'])
def upload_file():
tokens=0
results=0
token=0
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
tokens=tokenize(filename)
#results=check_data(tokens)
resultsA=[]
notFound=[]
endL=[]
tokensFound=[]
if checkHistory(filename)==0:
dataTakenFromVar="FROM NEW FILE"
counts=Counter(tokens)
for x in sorted(counts.keys()):
try:
results=check_data(x)
token=x
resultsA.append(results)
for k, v in sorted(counts.items()):
if k==token:
l=[]
l.append(k)
l.append(v)
l.append(results)
endL.append(l)
except:
for k, v in sorted(counts.items()):
if k==x:
l=[]
l.append(k)
l.append(v)
notFound.append(l)
tokensFound = [x for x in tokens if x not in notFound]
insertToDatabase(filename,endL,notFound)
else:
dataTakenFromVar ="FROM DATABASE"
dbResult = checkHistory(filename)
endL = dbResult.distinct("found")
notFound = dbResult.distinct("notfound")
tokensFound=[]
return render_template('results.html',
title='Data Analysis App',
tokens=tokens,
token=token,
endL=endL,
notFound=notFound,
dataTakenFromVar=dataTakenFromVar)
##Insert new analysis into database
def insertToDatabase(filename,endL,notFound):
hashOfFile = hashFileContents(filename)
post = {"hash": hashOfFile,
"filename": filename,
"found": endL,
"notfound": notFound,
"date": datetime.datetime.utcnow()}
collection = db.files # Table
collection.insert_one(post)
return 0
##Check the current files hash with the databse to prevent CPU overkill
def checkHistory(filename):
hashOfFile = hashFileContents(filename)
if db.files.find({"hash": hashOfFile}).count() >= 1:
return db.files.find({"hash": hashOfFile})
else:
return 0
##Get the Hash
def hashFileContents(filename):
f=open("C:/Users/danha/Desktop/webapp/uploads/"+filename,"r")
data=f.read()
hashed=""
hashed = sha_hash = hashlib.sha256(bytes(data,encoding='utf-8')).hexdigest()
return hashed
##Use nltk to tokenize the input file.
def tokenize(filename):
f=open("C:/Users/danha/Desktop/webapp/uploads/"+filename,"r")
tokens=[]
tokenizer = RegexpTokenizer(r'\w+')
for line in f:
tokens+=tokenizer.tokenize(line.lower())
f.close()
return tokens
##Pass the toekens dict into this function to check each token against the json file.
def check_data(tokens):
with open('C:/Users/danha/Desktop/webapp/ea-thesaurus-lower.json') as normsf:
norms = json.load(normsf)
result=norms[tokens][0:3];
return result
##History.
@app.route('/history')
def history1():
dbFiles=[]
for post in db.files.find():
dbFiles.append(post)
return render_template('history.html',
title='Data Analysis App',
dbFiles=dbFiles)
@app.route('/history/<filename>')
def history(filename):
dataTakenFromVar ="FROM DATABASE"
dbResult = checkHistory(filename)
endL = dbResult.distinct("found")
notFound = dbResult.distinct("notfound")
tokensFound=[]
tokens=[]
token=[]
return render_template('results.html',
title='Data Analysis App',
tokens=tokens,
token=token,
endL=endL,
notFound=notFound,
dataTakenFromVar=dataTakenFromVar)
if __name__ == '__main__':
app.run(debug=True)
| {
"repo_name": "Sledro/College-Programs",
"path": "Year-3/Python/FileAnalysisApp/app.py",
"copies": "1",
"size": "5905",
"license": "mit",
"hash": 6782828170468656000,
"line_mean": 31.9888268156,
"line_max": 89,
"alpha_frac": 0.5544453853,
"autogenerated": false,
"ratio": 4.303935860058309,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023683876095182903,
"num_lines": 179
} |
# app.py for Change4Change
import flask
from flask import Flask, request, url_for, jsonify
import json
import logging
# Date handling
import arrow
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
import uuid
app.secret_key = str(uuid.uuid4())
app.debug = CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
#############
####Pages####
#############
### Home Page ###
@app.route("/")
@app.route("/map")
def index():
app.logger.debug("Main page entry")
return flask.render_template('map.html')
@app.route("/_submitReport")
def getReport():
description = request.args.get('description',0, type=str)
atype = request.args.get('type',0, type=str)
alat = request.args.get('lat',0, type=float)
along = request.args.get('long',0, type=float)
anon = request.args.get('anonymous',0, type=bool)
print anon
return jsonify(result = alat)
@app.route("/_getMarkers")
def getMarkers():
markers = [[ 44.052, -123.086, "This is a test event",True,1591709677],
[ 44.042, -123.084, "A murder happened here",False,1490709677]]
return jsonify(result = markers)
if __name__ == "__main__":
import uuid
app.secret_key = str(uuid.uuid4())
app.debug = CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.run(port=CONFIG.PORT,threaded=True)
| {
"repo_name": "civiclee/Hack4Cause2017",
"path": "src/quack4acause/apptest.py",
"copies": "1",
"size": "1319",
"license": "mit",
"hash": 854296896280158000,
"line_mean": 20.6229508197,
"line_max": 75,
"alpha_frac": 0.6429112964,
"autogenerated": false,
"ratio": 3.2014563106796117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43443676070796117,
"avg_score": null,
"num_lines": null
} |
# app.py for Change4Change
import flask
from flask import Flask, request, url_for, jsonify, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from datetime import datetime
import json
import logging
import CONFIG
import uuid
###
# Globals
###
app = flask.Flask(__name__)
# Database Globals
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
username="change4change",
password="noSleep69",
hostname="change4change.mysql.pythonanywhere-services.com",
databasename="change4change$reports",
)
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
db = SQLAlchemy(app)
app.secret_key = str(uuid.uuid4())
app.debug = CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
#############
####Pages####
#############
### Home Page ###
@app.route("/", methods=['GET','POST'])
@app.route("/index", methods=['GET','POST'])
def index():
if (request.method == 'GET'):
app.logger.debug("Main page entry")
return render_template('index.html')
# Test function to test database interaction
# LATER: Will have admin authentication, then various
# queries to show reports.
@app.route("/displayReports", methods=['GET','POST'])
def displayReports():
results = Report.query.all()
return render_template('DBtest.html', results = results)
@app.route("/report", methods=['GET','POST'])
def report():
# If user is directed to the report page
if (request.method == 'GET'):
pass
# return render_template('report.html')
# If the user has posted a report form
else:
latitude = request.form.get('latitude')
longitude = request.form.get('longitude')
reportText = request.form.get('reportText')
isEmergency = request.form.get('isEmergency')
newReport = Report(
latitude = latitude,
longitude = longitude,
event_dt = datetime.now(),
text = reportText,
isEmergency = isEmergency
)
db.session.add(newReport)
db.session.commit()
return render_template('index.html')
# Database model declaration for report data
class Report(db.Model):
__tablename__ = "reports"
id = db.Column(db.Integer, primary_key = True)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
event_dt = db.Column(db.DateTime)
text = db.Column(db.String(4096))
isEmergency = db.Column(db.Boolean)
if __name__ == "__main__":
app.run(port=CONFIG.PORT,threaded=True)
| {
"repo_name": "civiclee/Hack4Cause2017",
"path": "src/quack4acause/DonationApp/app.py",
"copies": "1",
"size": "2427",
"license": "mit",
"hash": 3916433023359697400,
"line_mean": 24.8191489362,
"line_max": 106,
"alpha_frac": 0.6975690152,
"autogenerated": false,
"ratio": 3.293080054274084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9366931770489906,
"avg_score": 0.024743459796835558,
"num_lines": 94
} |
# App.py
# Application stuff.
# The application is responsible for managing the main frame window.
#
# We also grab the FileOpen command, to invoke our Python editor
" The PythonWin application code. Manages most aspects of MDI, etc "
import win32con
import win32api
import win32ui
import sys
import string
import os
from pywin.mfc import window, dialog, afxres
from pywin.mfc.thread import WinApp
import traceback
import regutil
import scriptutils
## NOTE: App and AppBuild should NOT be used - instead, you should contruct your
## APP class manually whenever you like (just ensure you leave these 2 params None!)
## Whoever wants the generic "Application" should get it via win32iu.GetApp()
# These are "legacy"
AppBuilder = None
App = None # default - if used, must end up a CApp derived class.
# Helpers that should one day be removed!
def AddIdleHandler(handler):
print "app.AddIdleHandler is deprecated - please use win32ui.GetApp().AddIdleHandler() instead."
return win32ui.GetApp().AddIdleHandler(handler)
def DeleteIdleHandler(handler):
print "app.DeleteIdleHandler is deprecated - please use win32ui.GetApp().DeleteIdleHandler() instead."
return win32ui.GetApp().DeleteIdleHandler(handler)
# Helper for writing a Window position by name, and later loading it.
def SaveWindowSize(section,rect,state=""):
""" Writes a rectangle to an INI file
Args: section = section name in the applications INI file
rect = a rectangle in a (cy, cx, y, x) tuple
(same format as CREATESTRUCT position tuples)."""
left, top, right, bottom = rect
if state: state = state + " "
win32ui.WriteProfileVal(section,state+"left",left)
win32ui.WriteProfileVal(section,state+"top",top)
win32ui.WriteProfileVal(section,state+"right",right)
win32ui.WriteProfileVal(section,state+"bottom",bottom)
def LoadWindowSize(section, state=""):
""" Loads a section from an INI file, and returns a rect in a tuple (see SaveWindowSize)"""
if state: state = state + " "
left = win32ui.GetProfileVal(section,state+"left",0)
top = win32ui.GetProfileVal(section,state+"top",0)
right = win32ui.GetProfileVal(section,state+"right",0)
bottom = win32ui.GetProfileVal(section,state+"bottom",0)
return (left, top, right, bottom)
def RectToCreateStructRect(rect):
return (rect[3]-rect[1], rect[2]-rect[0], rect[1], rect[0] )
# Define FrameWindow and Application objects
#
# The Main Frame of the application.
class MainFrame(window.MDIFrameWnd):
sectionPos = "Main Window"
statusBarIndicators = ( afxres.ID_SEPARATOR, #// status line indicator
afxres.ID_INDICATOR_CAPS,
afxres.ID_INDICATOR_NUM,
afxres.ID_INDICATOR_SCRL,
win32ui.ID_INDICATOR_LINENUM,
win32ui.ID_INDICATOR_COLNUM )
def OnCreate(self, cs):
self._CreateStatusBar()
return 0
def _CreateStatusBar(self):
self.statusBar = win32ui.CreateStatusBar(self)
self.statusBar.SetIndicators(self.statusBarIndicators)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_LINENUM)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_COLNUM)
def OnUpdatePosIndicator(self, cmdui):
editControl = scriptutils.GetActiveEditControl()
value = " " * 5
if editControl is not None:
try:
startChar, endChar = editControl.GetSel()
lineNo = editControl.LineFromChar(startChar)
colNo = endChar - editControl.LineIndex(lineNo)
if cmdui.m_nID==win32ui.ID_INDICATOR_LINENUM:
value = "%0*d" % (5, lineNo + 1)
else:
value = "%0*d" % (3, colNo + 1)
except win32ui.error:
pass
cmdui.SetText(value)
cmdui.Enable()
def PreCreateWindow(self, cc):
cc = self._obj_.PreCreateWindow(cc)
pos = LoadWindowSize(self.sectionPos)
self.startRect = pos
if pos[2] - pos[0]:
rect = RectToCreateStructRect(pos)
cc = cc[0], cc[1], cc[2], cc[3], rect, cc[5], cc[6], cc[7], cc[8]
return cc
def OnDestroy(self, msg):
# use GetWindowPlacement(), as it works even when min'd or max'd
rectNow = self.GetWindowPlacement()[4]
if rectNow != self.startRect:
SaveWindowSize(self.sectionPos, rectNow)
return 0
class CApp(WinApp):
" A class for the application "
def __init__(self):
self.oldCallbackCaller = None
WinApp.__init__(self, win32ui.GetApp() )
self.idleHandlers = []
def InitInstance(self):
" Called to crank up the app "
HookInput()
numMRU = win32ui.GetProfileVal("Settings","Recent File List Size", 10)
win32ui.LoadStdProfileSettings(numMRU)
# self._obj_.InitMDIInstance()
if win32api.GetVersionEx()[0]<4:
win32ui.SetDialogBkColor()
win32ui.Enable3dControls()
# install a "callback caller" - a manager for the callbacks
# self.oldCallbackCaller = win32ui.InstallCallbackCaller(self.CallbackManager)
self.LoadMainFrame()
self.SetApplicationPaths()
def ExitInstance(self):
" Called as the app dies - too late to prevent it here! "
win32ui.OutputDebug("Application shutdown\n")
# Restore the callback manager, if any.
try:
win32ui.InstallCallbackCaller(self.oldCallbackCaller)
except AttributeError:
pass
if self.oldCallbackCaller:
del self.oldCallbackCaller
self.frame=None # clean Python references to the now destroyed window object.
self.idleHandlers = []
# Attempt cleanup if not already done!
if self._obj_: self._obj_.AttachObject(None)
self._obj_ = None
global App
global AppBuilder
App = None
AppBuilder = None
return 0
def HaveIdleHandler(self, handler):
return handler in self.idleHandlers
def AddIdleHandler(self, handler):
self.idleHandlers.append(handler)
def DeleteIdleHandler(self, handler):
self.idleHandlers.remove(handler)
def OnIdle(self, count):
try:
ret = 0
handlers = self.idleHandlers[:] # copy list, as may be modified during loop
for handler in handlers:
try:
thisRet = handler(handler, count)
except:
print "Idle handler %s failed" % (repr(handler))
traceback.print_exc()
print "Idle handler removed from list"
try:
self.DeleteIdleHandler(handler)
except ValueError: # Item not in list.
pass
thisRet = 0
ret = ret or thisRet
return ret
except KeyboardInterrupt:
pass
def CreateMainFrame(self):
return MainFrame()
def LoadMainFrame(self):
" Create the main applications frame "
self.frame = self.CreateMainFrame()
self.SetMainFrame(self.frame)
self.frame.LoadFrame(win32ui.IDR_MAINFRAME, win32con.WS_OVERLAPPEDWINDOW)
self.frame.DragAcceptFiles() # we can accept these.
self.frame.ShowWindow(win32ui.GetInitialStateRequest())
self.frame.UpdateWindow()
self.HookCommands()
def OnHelp(self,id, code):
try:
if id==win32ui.ID_HELP_GUI_REF:
helpFile = regutil.GetRegisteredHelpFile("Pythonwin Reference")
helpCmd = win32con.HELP_CONTENTS
else:
helpFile = regutil.GetRegisteredHelpFile("Main Python Documentation")
helpCmd = win32con.HELP_FINDER
if helpFile is None:
win32ui.MessageBox("The help file is not registered!")
else:
import help
help.OpenHelpFile(helpFile, helpCmd)
except:
t, v, tb = sys.exc_info()
win32ui.MessageBox("Internal error in help file processing\r\n%s: %s" % (t,v))
tb = None # Prevent a cycle
def DoLoadModules(self, modules):
# XXX - this should go, but the debugger uses it :-(
# dont do much checking!
for module in modules:
__import__(module)
def HookCommands(self):
self.frame.HookMessage(self.OnDropFiles,win32con.WM_DROPFILES)
self.HookCommand(self.HandleOnFileOpen,win32ui.ID_FILE_OPEN)
self.HookCommand(self.HandleOnFileNew,win32ui.ID_FILE_NEW)
self.HookCommand(self.OnFileMRU,win32ui.ID_FILE_MRU_FILE1)
self.HookCommand(self.OnHelpAbout,win32ui.ID_APP_ABOUT)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_PYTHON)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_GUI_REF)
# Hook for the right-click menu.
self.frame.GetWindow(win32con.GW_CHILD).HookMessage(self.OnRClick,win32con.WM_RBUTTONDOWN)
def SetApplicationPaths(self):
# Load the users/application paths
new_path = []
apppath=win32ui.GetProfileVal('Python','Application Path','').split(';')
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
for extra_num in range(1,11):
apppath=win32ui.GetProfileVal('Python','Application Path %d'%extra_num,'').split(';')
if len(apppath) == 0:
break
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
sys.path = new_path + sys.path
def OnRClick(self,params):
" Handle right click message "
# put up the entire FILE menu!
menu = win32ui.LoadMenu(win32ui.IDR_TEXTTYPE).GetSubMenu(0)
menu.TrackPopupMenu(params[5]) # track at mouse position.
return 0
def OnDropFiles(self,msg):
" Handle a file being dropped from file manager "
hDropInfo = msg[2]
self.frame.SetActiveWindow() # active us
nFiles = win32api.DragQueryFile(hDropInfo)
try:
for iFile in range(0,nFiles):
fileName = win32api.DragQueryFile(hDropInfo, iFile)
win32ui.GetApp().OpenDocumentFile( fileName )
finally:
win32api.DragFinish(hDropInfo);
return 0
# No longer used by Pythonwin, as the C++ code has this same basic functionality
# but handles errors slightly better.
# It all still works, tho, so if you need similar functionality, you can use it.
# Therefore I havent deleted this code completely!
# def CallbackManager( self, ob, args = () ):
# """Manage win32 callbacks. Trap exceptions, report on them, then return 'All OK'
# to the frame-work. """
# import traceback
# try:
# ret = apply(ob, args)
# return ret
# except:
# # take copies of the exception values, else other (handled) exceptions may get
# # copied over by the other fns called.
# win32ui.SetStatusText('An exception occured in a windows command handler.')
# t, v, tb = sys.exc_info()
# traceback.print_exception(t, v, tb.tb_next)
# try:
# sys.stdout.flush()
# except (NameError, AttributeError):
# pass
# Command handlers.
def OnFileMRU( self, id, code ):
" Called when a File 1-n message is recieved "
fileName = win32ui.GetRecentFileList()[id - win32ui.ID_FILE_MRU_FILE1]
win32ui.GetApp().OpenDocumentFile(fileName)
def HandleOnFileOpen( self, id, code ):
" Called when FileOpen message is received "
win32ui.GetApp().OnFileOpen()
def HandleOnFileNew( self, id, code ):
" Called when FileNew message is received "
win32ui.GetApp().OnFileNew()
def OnHelpAbout( self, id, code ):
" Called when HelpAbout message is received. Displays the About dialog. "
win32ui.InitRichEdit()
dlg=AboutBox()
dlg.DoModal()
def _GetRegistryValue(key, val, default = None):
# val is registry value - None for default val.
try:
hkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
try:
hkey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
return default
scintilla = "Scintilla is Copyright 1998-2008 Neil Hodgson (http://www.scintilla.org)"
idle = "This program uses IDLE extensions by Guido van Rossum, Tim Peters and others."
contributors = "Thanks to the following people for making significant contributions: Roger Upole, Sidnei da Silva, Sam Rushing, Curt Hagenlocher, Dave Brennan, Roger Burnham, Gordon McMillan, Neil Hodgson, Laramie Leavitt. (let me know if I have forgotten you!)"
# The About Box
class AboutBox(dialog.Dialog):
def __init__(self, idd=win32ui.IDD_ABOUTBOX):
dialog.Dialog.__init__(self, idd)
def OnInitDialog(self):
text = "Pythonwin - Python IDE and GUI Framework for Windows.\n\n%s\n\nPython is %s\n\n%s\n\n%s\n\n%s" % (win32ui.copyright, sys.copyright, scintilla, idle, contributors)
self.SetDlgItemText(win32ui.IDC_EDIT1, text)
# Get the build number - written by installers.
# For distutils build, read pywin32.version.txt
import distutils.sysconfig
site_packages = distutils.sysconfig.get_python_lib(plat_specific=1)
try:
build_no = open(os.path.join(site_packages, "pywin32.version.txt")).read().strip()
ver = "pywin32 build %s" % build_no
except EnvironmentError:
ver = None
if ver is None:
# See if we are Part of Active Python
ver = _GetRegistryValue("SOFTWARE\\ActiveState\\ActivePython", "CurrentVersion")
if ver is not None:
ver = "ActivePython build %s" % (ver,)
if ver is None:
ver = ""
self.SetDlgItemText(win32ui.IDC_ABOUT_VERSION, ver)
self.HookCommand(self.OnButHomePage, win32ui.IDC_BUTTON1)
def OnButHomePage(self, id, code):
if code == win32con.BN_CLICKED:
win32api.ShellExecute(0, "open", "http://starship.python.net/crew/mhammond/win32", None, "", 1)
def Win32RawInput(prompt=None):
"Provide raw_input() for gui apps"
# flush stderr/out first.
try:
sys.stdout.flush()
sys.stderr.flush()
except:
pass
if prompt is None: prompt = ""
ret=dialog.GetSimpleInput(prompt)
if ret==None:
raise KeyboardInterrupt("operation cancelled")
return ret
def Win32Input(prompt=None):
"Provide input() for gui apps"
return eval(raw_input(prompt))
def HookInput():
try:
raw_input
# must be py2x...
sys.modules['__builtin__'].raw_input=Win32RawInput
sys.modules['__builtin__'].input=Win32Input
except NameError:
# must be py3k
import code
sys.modules['builtins'].input=Win32RawInput
def HaveGoodGUI():
"""Returns true if we currently have a good gui available.
"""
return "pywin.framework.startup" in sys.modules
def CreateDefaultGUI( appClass = None):
"""Creates a default GUI environment
"""
if appClass is None:
import intpyapp # Bring in the default app - could be param'd later.
appClass = intpyapp.InteractivePythonApp
# Create and init the app.
appClass().InitInstance()
def CheckCreateDefaultGUI():
"""Checks and creates if necessary a default GUI environment.
"""
rc = HaveGoodGUI()
if not rc:
CreateDefaultGUI()
return rc
| {
"repo_name": "Mj258/weiboapi",
"path": "srapyDemo/envs/Lib/site-packages/pythonwin/pywin/framework/app.py",
"copies": "12",
"size": "13974",
"license": "mit",
"hash": -3339061744887958000,
"line_mean": 33.25,
"line_max": 262,
"alpha_frac": 0.7196937169,
"autogenerated": false,
"ratio": 3.0638017978513483,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9783495514751347,
"avg_score": null,
"num_lines": null
} |
# app.py
# AtomicPuppy uses asyncio coroutines for concurrent IO
import asyncio
import signal
from atomicpuppy import AtomicPuppy
import sys
from decouple import config
from apps.readaccount.logger import logging
from apps.readaccount.processors import process_deposit, process_transfer
logger = logging.getLogger(__name__)
# AtomicPuppy needs a callback to pass you messages.
def handle(event):
if event.stream == 'accounts' and event.type == 'created-deposit':
return process_deposit(event)
elif event.stream == 'accounts' and event.type == 'created-transfer':
return process_transfer(event)
logger.info('handle event: {}'.format(vars(event)))
return
cfg = {
'atomicpuppy': {
'host': config('EVENTSTORE_HOST', default='eventstore'),
'port': config('EVENTSTORE_PORT', default=2113, cast=int),
'streams': ['accounts']
}
}
# Config is read from yaml files.
ap = AtomicPuppy(cfg, handle)
loop = asyncio.get_event_loop()
# to kill the puppy, call stop()
def stop():
logger.debug("SIGINT received, shutting down")
ap.stop()
sys.exit()
loop.add_signal_handler(signal.SIGINT, stop)
# and to start it call start.
loop.run_until_complete(ap.start()) | {
"repo_name": "ferstdigital/cqrs-python-demo",
"path": "apps/readaccount/app.py",
"copies": "1",
"size": "1224",
"license": "mit",
"hash": 2360747732311586300,
"line_mean": 27.488372093,
"line_max": 73,
"alpha_frac": 0.7034313725,
"autogenerated": false,
"ratio": 3.4971428571428573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47005742296428576,
"avg_score": null,
"num_lines": null
} |
# app.py
# Creator: Max Greenwald
# Updated: 3/20/17
# Purpose: Create a Flask app and define the necessary API routes
from flask import Flask, jsonify, request, abort
from flask_sqlalchemy import SQLAlchemy
from models import (Doctor, Review)
# Initialize Flask app with SQLAlchemy
app = Flask(__name__)
app.config.from_pyfile('config.py')
db = SQLAlchemy(app)
@app.route('/')
def main_page():
return "<html><head></head><body>A RESTful API in Flask using SQLAlchemy. For more info on usage, go to <a href>https://github.com/mgreenw/flask-restapi-example</a>.</body></html>"
# Doctor Routes
@app.route('/api/v1/doctors/<id>')
def show_doctor(id):
try:
doctor = Doctor.query.filter_by(id=id).first()
return jsonify(doctor.serialize)
except:
return not_found("Doctor does not exist")
@app.route('/api/v1/doctors', methods=['POST'])
def create_doctor():
if not request.is_json or 'name' not in request.get_json():
return bad_request('Missing required data.')
doctor = Doctor(request.get_json()['name'])
db.session.add(doctor)
db.session.commit()
return jsonify({'doctor': doctor.serialize}), 201
#Review Routes
@app.route('/api/v1/reviews/<id>')
def show_review(id):
try:
review = Review.query.filter_by(id=id).first_or_404()
return jsonify(id=review.id,
doctor_id=review.doctor_id,
description=review.description,
doctor=dict(id=review.doctor.id,
name=review.doctor.name))
except:
return not_found("Review does not exist.")
@app.route('/api/v1/reviews', methods=['POST'])
def create_review():
request_json = request.get_json()
if not request.is_json or 'doctor_id' not in request_json or 'description' not in request_json:
return bad_request('Missing required data.')
doctor_id = request_json['doctor_id']
# If the doctor_id is invalid, generate the appropriate 400 message
try:
review = Review(doctor_id=doctor_id, description=request_json['description'])
db.session.add(review)
db.session.commit()
except:
return bad_request('Given doctor_id does not exist.')
return jsonify({'review': review.serialize}), 201
# Custom Error Helper Functions
def bad_request(message):
response = jsonify({'error': message})
response.status_code = 400
return response
def not_found(message):
response = jsonify({'error': message})
response.status_code = 404
return response
| {
"repo_name": "mgreenw/flask-restapi-example",
"path": "app.py",
"copies": "1",
"size": "2561",
"license": "mit",
"hash": -262296473858993820,
"line_mean": 32.2597402597,
"line_max": 184,
"alpha_frac": 0.65560328,
"autogenerated": false,
"ratio": 3.5868347338935576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47424380138935573,
"avg_score": null,
"num_lines": null
} |
# app.py
from flask import Flask
from flask import render_template
from flask import Response
app = Flask(__name__)
app.config.from_pyfile('config.py')
@app.route('/coreos-ipxe/<userid>/<sshkeyname>/<instanceid>')
@app.route('/coreos-ipxe/<userid>/<sshkeyname>/<instanceid>/<channel>')
def coreos_ipxe(userid, instanceid, sshkeyname, channel='stable') :
return render_template(
'coreos-ipxe',
userid=userid,
sshkeyname=sshkeyname,
sshkey=key(userid, sshkeyname),
instanceid=instanceid,
channel=channel
)
@app.route('/key/<userid>')
@app.route('/key/<userid>/<sshkeyname>')
def key(userid, sshkeyname='id_rsa.pub') :
return open('keys/{0}/{1}'.format(userid, sshkeyname)).read()
@app.route('/coreos-cloudconfig-template/<userid>/<instanceid>')
def instancetemplate(userid, instanceid) :
return 'coreos-cloudconfig'
@app.route('/coreos-cloudconfig/<userid>/<sshkeyname>/<instanceid>')
def coreos_cloudconfig(userid, sshkeyname, instanceid) :
return Response(
render_template(
instancetemplate(userid, instanceid),
key=key(userid, sshkeyname)
),
mimetype='text/x-yaml'
)
@app.route("/ping")
def ping():
return 'PONG'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| {
"repo_name": "andrewrothstein/docker-coreos-ipxeserver",
"path": "app.py",
"copies": "1",
"size": "1214",
"license": "mit",
"hash": -1393081201703767000,
"line_mean": 25.9777777778,
"line_max": 71,
"alpha_frac": 0.7100494234,
"autogenerated": false,
"ratio": 3.0199004975124377,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9040534759231074,
"avg_score": 0.03788303233627286,
"num_lines": 45
} |
# app.py
import datetime
import functools
import os
import re
import urllib
from flask import (Flask, flash, Markup, redirect, render_template, request, Response, session, url_for)
from markdown import markdown
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.extra import ExtraExtension
from micawber import bootstrap_basic, parse_html
from micawber.cache import Cache as OEmbedCache
from peewee import *
from playhouse.flask_utils import FlaskDB, get_object_or_404, object_list
from playhouse.sqlite_ext import *
ADMIN_PASSWORD = 'testpw'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
DATABASE = 'sqliteext:///%s' % os.path.join(APP_DIR, 'blog.db')
DEBUG = False
SECRET_KEY = 'super, secret'
SITE_WIDTH = 800
app = Flask(__name__)
app.config.from_object(__name__)
flask_db = FlaskDB(app)
database = flask_db.database
oembed_providers = bootstrap_basic(OEmbedCache())
class Entry(flask_db.Model):
title = CharField()
slug = CharField(unique=True) # Friendly post name
content = TextField()
published = BooleanField(index=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
@property
def html_content(self):
hilite = CodeHiliteExtension(linenums=False)
extras = ExtraExtension()
markdown_content = markdown(self.content, extensions=[hilite, extras])
oembed_content = parse_html(
markdown_content,
oembed_providers,
urlize_all=True,
maxwidth=app.config['SITE_WIDTH'])
return Markup(oembed_content)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = re.sub('[^\w]+', '-', self.title.lower())
ret = super(Entry, self).save(*args, **kwargs)
# store search content.
self.update_search_index()
return ret
def update_search_index(self):
try:
fts_entry = FTSEntry.get(FTSEntry.entry_id == self.id)
except FTSEntry.DoesNotExist:
fts_entry = FTSEntry(entry_id=self.id)
force_insert = True
else:
force_insert = False
fts_entry.content = '\n'.join((self.title, self.content))
fts_entry.save(force_insert=force_insert)
@classmethod
def public(cls):
return Entry.select().where(Entry.published == True)
@classmethod
def search(cls, query):
words = [word.strip() for word in query.split() if word.strip()]
if not words:
# return empty query
return Entry.select().where(Entry.id == 0)
else:
search = ' '.join(words)
return (FTSEntry
.select(
FTSEntry,
Entry,
FTSEntry.rank().alias('score'))
.join(Entry, on=(FTSEntry.entry_id == Entry.id).alias('entry'))
.where((Entry.published == True) & (FTSEntry.match(search)))
.order_by(SQL('score').desc()))
@classmethod
def drafts(cls):
return Entry.select().where(Entry.published == False)
class FTSEntry(FTSModel):
entry_id = IntegerField()
content = TextField()
class Meta:
database = database
def login_required(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if session.get('logged_in'):
return fn(*args, **kwargs)
return redirect(url_for('login', next=request.path))
return inner
@app.route('/login/', methods=['GET', 'POST'])
def login():
next_url = request.args.get('next') or request.form.get('next')
if request.method == 'POST' and request.form.get('password'):
password = request.form.get('password')
if password == app.config['ADMIN_PASSWORD']:
session['logged_in'] = True
session.permanent = True # Use cookie to store session.
flash('You are now entering GH7.Tech', 'success')
return redirect(next_url or url_for('index'))
else:
flash('Incorrect Password', 'danger')
return render_template('login.html', next_url=next_url)
@app.route('/logout/', methods=['GET', 'POST'])
def logout():
if request.method == 'POST':
session.clear()
return redirect(url_for('login'))
return render_template('logout.html')
@app.route('/')
def index():
search_query = request.args.get('q')
if search_query:
query = Entry.search(search_query)
else:
query = Entry.public().order_by(Entry.timestamp.desc())
return object_list('index.html',
query,
search=search_query,
check_bounds=False)
def _create_or_edit(entry, template):
if request.method == 'POST':
entry.title = request.form.get('title') or ''
entry.content = request.form.get('content') or ''
entry.published = request.form.get('published') or False
if not (entry.title and entry.content):
flash('Title and Content are required.', 'danger')
else:
# Wrap the call to save in a transaction so we can roll it back
# cleanly in the event of an integrity error.
try:
with database.atomic():
entry.save()
except IntegrityError:
flash('Error: this title is already in use.', 'danger')
else:
flash('Entry saved successfully.', 'success')
if entry.published:
return redirect(url_for('detail', slug=entry.slug))
else:
return redirect(url_for('edit', slug=entry.slug))
return render_template(template, entry=entry)
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def create():
return _create_or_edit(Entry(title='', content=''), 'create.html')
@app.route('/drafts/')
@login_required
def drafts():
query = Entry.drafts().order_by(Entry.timestamp.desc())
return object_list('index.html', query)
@app.route('/<slug>/')
def detail(slug):
if session.get('logged_in'):
query = Entry.select()
else:
query = Entry.public()
entry = get_object_or_404(query, Entry.slug == slug)
return render_template('detail.html', entry=entry)
@app.route('/<slug>/edit', methods=['GET', 'POST'])
@login_required
def edit(slug):
entry = get_object_or_404(Entry, Entry.slug == slug)
return _create_or_edit(entry, 'edit.html')
@app.template_filter('clean_querystring')
def clean_querystring(request_args, *keys_to_remove, **new_values):
querystring = dict((key, value) for key, value in request_args.items())
for key in keys_to_remove:
querystring.pop(key, None)
querystring.update(new_values)
return urllib.urlencode(querystring)
@app.errorhandler(404)
def not_found(exc):
return Response('<h3>Not Found :(</h3>'), 404
def main():
database.create_tables([Entry, FTSEntry], safe=True)
app.run(debug=True)
if __name__ == '__main__':
main()
| {
"repo_name": "ghosthand7/gh7.tech",
"path": "app.py",
"copies": "1",
"size": "7055",
"license": "mit",
"hash": -3277533033472818700,
"line_mean": 29.5411255411,
"line_max": 104,
"alpha_frac": 0.6103472714,
"autogenerated": false,
"ratio": 3.7566560170394037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4867003288439404,
"avg_score": null,
"num_lines": null
} |
""" app.py """
from flask import Flask, render_template
import pybreaker
import requests
from listener import LogListener
app = Flask(__name__)
time_breaker = pybreaker.CircuitBreaker(fail_max=3, reset_timeout=30)
time_breaker.add_listeners(LogListener(app))
@time_breaker
def _get_time():
try:
response = requests.get('http://localhost:3001/time', timeout=3.0)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
# please note that this is an example implementation and is
# dangerously masking the original exception. i highly recommend
# you take the appropriate means to capture/log the exception so
# that you are aware of the underlying problem that is triggering
# the circuit breaker!
raise pybreaker.CircuitBreakerError
return response.json().get('datetime')
def get_time():
try:
return _get_time()
except pybreaker.CircuitBreakerError:
return 'Unavailable'
def get_user():
response = requests.get('http://localhost:3002/user')
return response.json().get('name')
@app.errorhandler(500)
def page_not_found(_):
return 'Server error', 500
@app.route("/")
def hello():
time = get_time()
name = get_user()
return render_template('hello.html', name=name, time=time)
if __name__ == "__main__":
app.run(port=3000, debug=True)
| {
"repo_name": "danriti/short-circuit",
"path": "app.py",
"copies": "1",
"size": "1398",
"license": "mit",
"hash": 8355183602852748000,
"line_mean": 24.4181818182,
"line_max": 74,
"alpha_frac": 0.6766809728,
"autogenerated": false,
"ratio": 3.6984126984126986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48750936712126985,
"avg_score": null,
"num_lines": null
} |
# app.py
import datetime
import functools
import re
import os
import urllib
from flask import (Flask, abort, flash, Markup, redirect, render_template,
request, Response, session, url_for)
from markdown import markdown
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.extra import ExtraExtension
from micawber import bootstrap_basic, parse_html
from micawber.cache import Cache as OEmbedCache
from peewee import *
from playhouse.flask_utils import FlaskDB, get_object_or_404, object_list
from playhouse.sqlite_ext import *
ADMIN_PASSWORD = 'secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
DATABASE = 'sqliteext:///%s' % os.path.join(APP_DIR, 'blog.db')
DEBUG = False
SECRET_KEY = 'shhh, secret!' # Used by Flask to encrypt session cookie.
SITE_WIDTH = 800
app = Flask(__name__)
app.config.from_object(__name__)
flask_db = FlaskDB(app)
database = flask_db.database
oembed_providers = bootstrap_basic(OEmbedCache())
# Data base modeling
class Entry(flask_db.Model):
title = CharField()
slug = CharField(unique=True)
content = TextField()
published = BooleanField(index=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = re.sub('[^\w]+', '-', self.title.lower())
ret = super(Entry, self).save(*args, **kwargs)
# Store search content.
self.update_search_index()
return ret
def update_search_index(self):
try:
fts_entry = FTSEntry.get(FTSEntry.entry_id == self.id)
except FTSEntry.DoesNotExist:
fts_entry = FTSEntry(entry_id=self.id)
force_insert = True
else:
force_insert = False
fts_entry.content = '\n'.join((self.title, self.content))
fts_entry.save(force_insert=force_insert)
@classmethod
def public(cls):
return Entry.select().where(Entry.published == True)
@classmethod
def search(cls, query):
words = [word.strip() for word in query.split() if word.strip()]
if not words:
# Return empty query.
return Entry.select().where(Entry.id == 0)
else:
search = ' '.join(words)
return (FTSEntry
.select(
FTSEntry,
Entry,
FTSEntry.rank().alias('score'))
.join(Entry, on=(FTSEntry.entry_id == Entry.id).alias('entry'))
.where(
(Entry.published == True) &
(FTSEntry.match(search)))
.order_by(SQL('score').desc()))
@classmethod
def drafts(cls):
return Entry.select().where(Entry.published == False)
@property
def html_content(self):
hilite = CodeHiliteExtension(linenums=False, css_class='highlight')
extras = ExtraExtension()
markdown_content = markdown(self.content, extensions=[hilite, extras])
oembed_content = parse_html(
markdown_content,
oembed_providers,
urlize_all=True,
maxwidth=app.config['SITE_WIDTH'])
return Markup(oembed_content)
class FTSEntry(FTSModel):
entry_id = IntegerField()
content = TextField()
class Meta:
database = database
# session handling
def login_required(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if session.get('logged_in'):
return fn(*args, **kwargs)
return redirect(url_for('login', next=request.paths))
return inner
@app.route('/login/', methods=['GET', 'POST'])
def login():
next_url = request.args.get('next') or request.form.get('next')
if request.method == 'POST' and request.form.get('password'):
password = request.form.get('password')
if password == app.config['ADMIN_PASSWORD']:
session['logged_in'] = True
session.permanent = True # Use cookie to store session.
flash('You are now logged in.', 'success')
return redirect(next_url or url_for('index'))
else:
flash('Incorrect password.', 'danger')
return render_template('login.html', next_url=next_url)
@app.route('/logout/', methods=['GET', 'POST'])
def logout():
if request.method == 'POST':
session.clear()
return redirect(url_for('login'))
return render_template('logout.html')
# index view
@app.route('/')
def index():
search_query = request.args.get('q')
if search_query:
query = Entry.search(search_query)
else:
query = Entry.public().order_by(Entry.timestamp.desc())
return object_list('index.html', query, search=search_query)
# drafts view
@app.route('/drafts/')
@login_required
def drafts():
query = Entry.drafts().order_by(Entry.timestamp.desc())
return object_list('index.html', query)
# create view
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def create():
if request.method == 'POST':
if request.form.get('title') and request.form.get('content'):
entry = Entry.create(
title=request.form['title'],
content=request.form['title'],
published=request.form.get('published') or False)
flash('Entry created successfully.', 'success')
if entry.published:
return redirect(url_for('detail', slug=entry.slug))
else:
return redirect(url_for('edit', slug=entry.slug))
else:
flash('Title and Content are required.', 'danger')
return render_template('create.html')
# entry view
@app.route('/<slug>/')
def detail(slug):
if session.get('logged_in'):
query = Entry.select()
else:
query = Entry.public()
entry = get_object_or_404(query, Entry.slug == slug)
return render_template('detail.html', entry=entry)
# edit view
@app.route('/<slug>/edit/', methods=['GET', 'POST'])
@login_required
def edit(slug):
entry = get_object_or_404(Entry, Entry.slug == slug)
if request.method == 'POST':
if request.form.get('title') and request.form.get('content'):
entry.title = request.form['title']
entry.content = request.form['content']
entry.published = request.form.get('published') or False
entry.save()
flash('Entry saved successfully.', 'success')
if entry.published:
return redirect(url_for('detail', slug=entry.slug))
else:
return redirect(url_for('edit', slug=entry.slug))
else:
flash('Title and Content are required.', 'danger')
return render_template('edit.html', entry=entry)
# 404 Error handling
@app.template_filter('clean_querystring')
def clean_querystring(request_args, *keys_to_remove, **new_values):
querystring = dict((key, value) for key, value in request_args.items())
for key in keys_to_remove:
querystring.pop(key, None)
querystring.update(new_values)
return urllib.urlencode(querystring)
@app.errorhandler(404)
def not_found(exc):
return Response('<h3>Not found</h3>'), 404
def main():
database.create_tables([Entry, FTSEntry], safe=True)
app.run(debug=True)
if __name__ == '__main__':
main() | {
"repo_name": "HiroIshikawa/21playground",
"path": "payblog/blog/app/app.py",
"copies": "1",
"size": "7375",
"license": "mit",
"hash": -9175709813989772000,
"line_mean": 31.2096069869,
"line_max": 79,
"alpha_frac": 0.6122033898,
"autogenerated": false,
"ratio": 3.7859342915811087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9867004608770724,
"avg_score": 0.006226614522077037,
"num_lines": 229
} |
""" app.py """
import json
import os
import sys
from flask import Flask, jsonify
from flask import request # for getting query string
# eg: request.args.get('user') will get '?user=some-value'
from flask_restful import Api
from flask_restful import Resource
from werkzeug.exceptions import NotFound
from lemmatize.lemmatize import Lemmatize
from corpus.arabic.utils.pyarabic.araby.strip_harakat import StripHarakat
app = Flask(__name__)
api = Api(app)
@app.route('/hello')
def hello():
return jsonify({
'hello': 'world again'
})
def get_cltk_text_dir(lang, corpus='perseus'):
"""Take relative filepath, return absolute"""
cltk_home = os.path.expanduser('~/cltk_data')
text_dir = os.path.join(cltk_home, lang.casefold(), 'text', lang.casefold() + '_text_' + corpus, 'json')
return text_dir
def open_json(fp):
"""Open json file, return json."""
with open(fp) as fo:
return json.load(fo)
# Simple example
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
# Simple example
class TodoSimple(Resource):
def get(self, todo_id):
return {'This is an example with token': todo_id}
# Simple examples
api.add_resource(TodoSimple, '/todo/<string:todo_id>')
# api.add_resource(HelloWorld, '/hello')
# class CapitainsText(Resource):
# def get(self):
# fp_rel = '~/cltk_data/corpora/capitains_text_corpora/greekLit'
# fp = os.path.expanduser(fp_rel)
# files = os.listdir(fp)
# json_files = [file for file in files if file.endswith('.json')]
# for json_file in json_files:
# json_file_fp = os.path.join(fp, json_file)
# with open(json_file_fp) as file_open:
# x = json.load(file_open)
# return {'x': x}
# return {'files': json_files}
#
# # http://localhost:5000/lang
# api.add_resource(CapitainsText, '/text_example')
# class CapitainsText(Resource):
# def get(self):
# fp_rel = '~/cltk_data/corpora/capitains_text_corpora/greekLit'
# fp = os.path.expanduser(fp_rel)
# files = os.listdir(fp)
# json_files = [file for file in files if file.endswith('.json')]
# for json_file in json_files:
# json_file_fp = os.path.join(fp, json_file)
# with open(json_file_fp) as file_open:
# x = json.load(file_open)
# return {'x': x}
# return {'files': json_files}
#
# # http://localhost:5000/lang
# api.add_resource(CapitainsText, '/text_example')
# Available functionality, NLP or text serving
# @app.route('/')
# def hello_world():
# return {'funcionality': ['nlp', 'text']}
@app.route('/')
def available_functionality():
return jsonify({'cltk_api_functionality': ['nlp', 'text']})
@app.route('/nlp')
def nlp_functionality():
return jsonify({'nlp_functionality': 'not yet implemented'})
# attempt at self-describing API; could be done better
@app.route('/text')
def text_functionality():
return jsonify({'next_level': 'lang'})
# Available langs for texts
class AvailableLangs(Resource):
def get(self):
return {'available_languages': ['greek', 'latin']}
api.add_resource(AvailableLangs, '/text/lang')
class AvailableText(Resource):
def get(self, lang):
repo_rel = '~/cltk_data/corpora/capitains_text_corpora/'
repo = os.path.expanduser(repo_rel)
if lang == 'greek':
lang_dir = 'greekLit'
file_ending = '__grc.json'
elif lang == 'latin':
lang_dir = 'latinLit'
file_ending = '__lat.json'
else:
return NotFound
lang_dir = os.path.join(repo, lang_dir)
lang_files = os.listdir(lang_dir)
lang_files_json = [file for file in lang_files if file.endswith('.json')]
# parse query str for translation flag
translation = request.args.get('translation')
if translation == 'english':
file_ending = '__eng.json'
lang_files_filtered = [file for file in lang_files_json if file.endswith(file_ending)]
lang_files_filtered_rm_end = [name[:-len(file_ending)] for name in lang_files_filtered]
return {'language': lang_files_filtered_rm_end}
api.add_resource(AvailableText, '/text/lang/<string:lang>')
class DisplayText(Resource):
def get(self, lang, text_name):
repo_rel = '~/cltk_data/corpora/capitains_text_corpora/'
repo = os.path.expanduser(repo_rel)
if lang == 'greek':
lang_dir = 'greekLit'
file_ending = '__grc.json'
elif lang == 'latin':
lang_dir = 'latinLit'
file_ending = '__lat.json'
else:
return NotFound
lang_dir = os.path.join(repo, lang_dir)
lang_files = os.listdir(lang_dir)
lang_files_json = [file for file in lang_files if file.endswith('.json')]
# parse query str for translation flag
translation = request.args.get('translation')
if translation == 'english':
file_ending = '__eng.json'
lang_files_filtered = [file for file in lang_files_json if file.endswith(file_ending)]
text_path = os.path.join(lang_dir, text_name + file_ending)
with open(text_path) as file_open:
# file_read = file_open.read()
file_dict = json.load(file_open)
return file_dict
# curl http://0.0.0.0:5000/text/lang/latin/virgil__aeneid?translation=english
# curl http://0.0.0.0:5000/text/lang/latin/virgil__aeneid
api.add_resource(DisplayText, '/text/lang/<string:lang>/<string:text_name>')
api.add_resource(Lemmatize, '/nlp/lemmatize/latin/simple')
api.add_resource(StripHarakat, '/nlp/corpus/arabic/utils/pyarabic/araby/strip_harakat')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| {
"repo_name": "kylepjohnson/cltk_api_v2",
"path": "app/app.py",
"copies": "1",
"size": "5837",
"license": "mit",
"hash": 6949494212429706000,
"line_mean": 29.7210526316,
"line_max": 108,
"alpha_frac": 0.6200102793,
"autogenerated": false,
"ratio": 3.286599099099099,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4406609378399099,
"avg_score": null,
"num_lines": null
} |
"""app.py
revision: 0.1 24.4.2014 initial by David Levy
Tornado server for mongodb tornado angular tutorial
"""
import os
import sys
import tornado
import pymongo
from tornado.options import options
from tornado import ioloop, web
from handlers.blog_handler import BlogHandler
from handlers.entry_handler import EntryHandler
from handlers.index_handler import IndexHandler
#adding local directory to path
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
"""
Loading default setting files
"""
import settings
"""
searching for a local_setting.py file that overrides default configuration
"""
try:
tornado.options.parse_config_file(
os.path.join(os.path.dirname(os.path.realpath(__file__)),'local_settings.py'),
False)
except Exception as e:
#print ('local settings: {}'.format(str(e)))
#TODO: handle different exceptions
print ('local_settings.py not defined, using default settings')
"""
Connecting to the mongodb database
"""
mongo_client = pymongo.MongoClient(options.mongodb_host)
db = mongo_client[options.mongodb_name]
def init_db(db):
try:
db.create_collection('blog')
except:
pass
db['blog'].ensure_index('slug', unique=True)
db['blog'].ensure_index('_id', unique=True)
try:
db.create_collection('user')
except:
pass
try:
db['user'].insert({'username':'admin','password':'admin','role':'admin'})
except:
pass
db['user'].ensure_index('username', unique=True)
db['user'].ensure_index('_id', unique=True)
if options.mobile_version:
static_path = options.mobile_static_path
else:
static_path = options.static_path
app = tornado.web.Application([
(r'/', IndexHandler),
#api prefix means that we load json data
(r'/api/blog', BlogHandler, dict(db=db)),
(r'/api/entry', EntryHandler, dict(db=db)),
],
static_path=static_path,
autoreload=True
)
if __name__ == '__main__':
#read settings from commandline
options.parse_command_line()
if options.init_db:
init_db(db)
print ('server running on http://localhost:{}'.format(options.port))
app.listen(options.port,xheaders=True)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.start()
| {
"repo_name": "niharathomas/mongodb-tornado-angular",
"path": "src/server/app.py",
"copies": "2",
"size": "2389",
"license": "mit",
"hash": -7174503011925264000,
"line_mean": 26.4597701149,
"line_max": 86,
"alpha_frac": 0.6395981582,
"autogenerated": false,
"ratio": 3.8532258064516127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007657154714426312,
"num_lines": 87
} |
# app.py
# runs connectfour program with Flask
# J. Hassler Thurston
# Personal website
# 12 January 2014
from datetime import timedelta
from flask import Flask, request, jsonify, abort, make_response, current_app
from functools import update_wrapper
import board, compute_moves
app = Flask(__name__)
# from http://flask.pocoo.org/snippets/56/
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
# error handling
# responses modified from http://blog.miguelgrinberg.com/post/designing-a-restful-api-with-python-and-flask
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.errorhandler(405)
def not_allowed(error):
return make_response(jsonify( { 'error': 'Method not allowed' } ), 405)
@app.errorhandler(500)
def internal_error(error):
return make_response(jsonify( { 'error': 'Internal server error' } ), 500)
# JSON requests should be in this form:
# {
# 'columns': number of columns,
# 'rows': number of rows,
# 'moves': array of all previous moves
# }
@app.route('/move', methods = ['POST'])
@crossdomain(origin='http://hasslerthurston.com')
def make_move():
# check to see if JSON exists
if not request.json:
abort(400)
# check to see if JSON is in the right format
if not checkJSON(request.json):
abort(400)
# if it is, define a variable to keep track of whether the computations ran successfully
successful = True
# create a new playing board
playing_board = board.Board(request.json['columns'], request.json['rows'])
# and check to see if it initialized correctly
if not playing_board.successfulInit:
successful = False
# initialize it with the previous moves
if successful:
successful = playing_board.move_sequence(request.json['moves'])
# make another move
if successful:
successful = compute_moves.compute_move(playing_board)
# if everything went well, return a JSON to the correct URL
if successful:
return jsonify(toJSON(playing_board))
else: abort(500)
# converts the current state of the playing board to JSON format, to be returned to the client
def toJSON(playing_board):
columns = playing_board.columns
rows = playing_board.rows
moves = playing_board.move_history
json = {
'columns': columns,
'rows': rows,
'moves': moves
}
return json
# checks to see if JSON from client is in the correct format
def checkJSON(json):
# rows, columns, and moves fields must exist
if not 'columns' in request.json or not 'rows' in request.json or not 'moves' in request.json:
return False
# rows and columns must be integers
if not isinstance(request.json['columns'], int) or not isinstance(request.json['rows'], int):
return False
# rows and columns must be > 4
if request.json['columns'] < 4 or request.json['rows'] < 4:
return False
# to not overload the server, rows and columns must be < 15
if request.json['columns'] > 15 or request.json['rows'] > 15:
return False
# moves must be a list
if not isinstance(request.json['moves'], list):
return False
# every move must be an integer
if not all(isinstance(move, int) for move in request.json['moves']):
return False
# if all this is satisfied, JSON is in the correct format
return True
if __name__ == '__main__':
# MAKE SURE TO NOT HAVE debug=True WHEN PUSHING TO PRODUCTION
app.run()
| {
"repo_name": "jthurst3/connectfour",
"path": "connectfour_app.py",
"copies": "1",
"size": "4892",
"license": "mit",
"hash": -7022930809173088000,
"line_mean": 31.8322147651,
"line_max": 107,
"alpha_frac": 0.6788634505,
"autogenerated": false,
"ratio": 3.5708029197080293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9606300226559135,
"avg_score": 0.028673228729779086,
"num_lines": 149
} |
# app.py
# where the first point of contact for all user messages goes through and where the bot replies back
import os
import sys
import json
import subprocess
import requests
from flask import Flask, request
# custom made modules
import sheets
import ai
app = Flask(__name__)
@app.route('/', methods=['GET'])
def verify():
# when the endpoint is registered as a webhook, it must echo back
# the 'hub.challenge' value it receives in the query arguments
if request.args.get("hub.mode") == "subscribe" and request.args.get("hub.challenge"):
if not request.args.get("hub.verify_token") == os.environ["VERIFY_TOKEN"]:
return "Verification token mismatch", 403
return request.args["hub.challenge"], 200
return "Hello world", 200
# any user action comes to this method first
@app.route('/', methods=['POST'])
def webhook():
# endpoint for processing incoming messaging events
data = request.get_json()
log(data) # you may not want to log every incoming message in production, but it's good for testing
if data["object"] == "page":
for entry in data["entry"]:
for messaging_event in entry["messaging"]:
if messaging_event.get("message"): # someone sent us a message
sender_id = messaging_event["sender"]["id"] # the facebook ID of the person sending you the message
recipient_id = messaging_event["recipient"]["id"] # the recipient's ID, which should be your page's facebook ID
message_text = messaging_event["message"]["text"] # the message's text
# call wit.ai method in utils.py
ai_response = ai.wit_response(message_text)
masjid, date = ai.extract_info(ai_response)
if date != None:
message = sheets.construct_schedule(date)
send_message(sender_id, message)
else:
message = sheets.construct_schedule(None)
send_message(sender_id, message)
if messaging_event.get("delivery"): # delivery confirmation
pass
if messaging_event.get("optin"): # optin confirmation
pass
if messaging_event.get("postback"): # user clicked/tapped "postback" button in earlier message
sender_id = messaging_event["sender"]["id"] # the facebook ID of the person sending you the message
recipient_id = messaging_event["recipient"]["id"] # the recipient's ID, which should be your page's facebook ID
payload_text = messaging_event["postback"]["payload"]
payload_text = payload_text.lower()
# when the user first uses the bot and presses the "get started" button (TO BE IMPLEMENTED)
if (payload_text == "get started"):
message = getStarted()
send_message(sender_id, message)
# when the menu option "Todays Prayer Times" is selected
if (payload_text == "todays prayer times"):
message = sheets.construct_schedule(date=None)
send_message(sender_id, message)
return "ok", 200
# method specifically for when 'get started' button is pressed
# ...because the message might chnage over time and having a method makes this easier to change
def getStarted():
capability = "At the moment the bot can:\n 1) Get today's prayer times \n 2) Get the prayer times for any date this calendar year.\n\n"
# feature 1 - get today's prayer times
todayTimes = "TODAY'S PRAYER TIMES:\n- Swipe up on the menu below and press \"Prayer times for today\". \n\n"
# feature 2 - get prayer times for any date in this calenday year
anyTime = "PRAYER TIMES FOR ANY DATE:\n- Swipe up in the menu below and press \"Send message\" then type in a date, say '10 May' and the prayer times for 10 May will be shown.\n\n"
# disclaimer sort of thing, explaining what source is used for the prayer times
note = "NOTE: The prayer times in this bot are based on the 2017 East London Mosque prayer times."
message = capability + todayTimes + anyTime + note
return message
# sends a TEXT message to the user
def send_message(recipient_id, message_text):
# print in heroku to check everything is okay
log("sending message to {recipient}: {text}".format(recipient=recipient_id, text=message_text))
# three variables below construct the message to be sent
params = {
"access_token": os.environ["PAGE_ACCESS_TOKEN"]
}
headers = {
"Content-Type": "application/json"
}
data = json.dumps({
"recipient": {
"id": recipient_id
},
"message": {
"text": message_text
}
})
# sends the message to the user
r = requests.post("https://graph.facebook.com/v2.6/me/messages", params=params, headers=headers, data=data)
if r.status_code != 200:
log(r.status_code)
log(r.text)
# method for debuggung
def log(message): # simple wrapper for logging to stdout on heroku
print str(message)
sys.stdout.flush()
if __name__ == '__main__':
app.run(debug=True)
| {
"repo_name": "tauseef-khan/london-prayer-times-bot",
"path": "app.py",
"copies": "1",
"size": "5473",
"license": "mit",
"hash": 4605186054947645400,
"line_mean": 35.7315436242,
"line_max": 184,
"alpha_frac": 0.6035081308,
"autogenerated": false,
"ratio": 4.245927075252133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5349435206052133,
"avg_score": null,
"num_lines": null
} |
# app.py or app/__init__.py
from flask import Flask, render_template, jsonify
import os
from action_commands import *
app = Flask(__name__)
app.config.from_object('config')
@app.route('/')
def home():
return render_template('dashboard.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
@app.route('/about')
def about():
# check device status we can ping the robot and check if its "alive"
return render_template('about.html')
# Now we can access the configuration variables via app.config["VAR_NAME"].
@app.route('/commands/<type>/<action>', methods=['POST'])
def commands(type, action):
print("Command Type %s" % type)
print("Command Action %s" % action)
# Camera
if type == "camera":
res = camera_action(action)
# Zoom
elif type == "zoom":
res = zoom_action(action)
# Move
elif type == "move":
res = move_action(action)
else:
res = { 'type': type, 'action': action, 'error': 'unknown type' }
return jsonify(**res)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=4000)
| {
"repo_name": "doron2402/CameraControlXYRoboticArm",
"path": "Server/app.py",
"copies": "1",
"size": "1123",
"license": "mit",
"hash": -3523815916798266000,
"line_mean": 23.4130434783,
"line_max": 77,
"alpha_frac": 0.6233303651,
"autogenerated": false,
"ratio": 3.476780185758514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4600110550858514,
"avg_score": null,
"num_lines": null
} |
# app.py rebuilding to test (from http://charlesleifer.com/blog/how-to-make-a-flask-blog-in-one-hour-or-less/)
import datetime
import functools
import os
import re
import urllib
from flask import (Flask, abort, flash, Markup, redirect, render_template,
request, Response, session, url_for)
from markdown import markdown
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.extra import ExtraExtension
from micawber import bootstrap_basic, parse_html
from micawber.cache import Cache as OEmbedCache
from peewee import *
from playhouse.flask_utils import FlaskDB, get_object_or_404, object_list
from playhouse.sqlite_ext import *
ADMIN_PASSWORD = 'secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
DATABASE = 'sqliteext:///%s' % os.path.join(APP_DIR, 'blog.db')
DEBUG = False
SECRET_KEY = 'shhh, secret!' # Used by Flask to encrypt session cookie.
SITE_WIDTH = 800
app = Flask(__name__)
app.config.from_object(__name__)
flask_db = FlaskDB(app)
database = flask_db.database
oembed_providers = bootstrap_basic(OEmbedCache())
class Entry(flask_db.Model):
title = CharField()
slug = CharField(unique=True)
content = TextField()
published = BooleanField(index=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = re.sub('[^\w]+', '-', self.title.lower())
ret = super(Entry, self).save(*args, **kwargs)
# Store search content.
self.update_search_index()
return ret
def update_search_index(self):
try:
fts_entry = FTSEntry.get(FTSEntry.entry_id == self.id)
except FTSEntry.DoesNotExist:
fts_entry = FTSEntry(entry_id=self.id)
force_insert = True
else:
force_insert = False
fts_entry.content = '\n'.join((self.title, self.content))
fts_entry.save(force_insert=force_insert)
class FTSEntry(FTSModel):
entry_id = IntegerField()
content = TextField()
class Meta:
database = database
@app.template_filter('clean_querystring')
def clean_querystring(request_args, *keys_to_remove, **new_values):
querystring = dict((key, value) for key, value in request_args.items())
for key in keys_to_remove:
querystring.pop(key, None)
querystring.update(new_values)
return urllib.urlencode(querystring)
@app.errorhandler(404)
def not_found(exc):
return Response('<h3> Still not working!! </h3>'), 404
def main():
database.create_tables([Entry, FTSEntry], safe=True)
app.run(debug=True)
if __name__ == '__main__':
main()
| {
"repo_name": "artopping/nyu-python",
"path": "course3/assignments/blog_virtual/app/app.py",
"copies": "1",
"size": "2688",
"license": "mit",
"hash": -8142415417861687000,
"line_mean": 29.8965517241,
"line_max": 110,
"alpha_frac": 0.677827381,
"autogenerated": false,
"ratio": 3.4198473282442747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9583519454196535,
"avg_score": 0.002831051009547968,
"num_lines": 87
} |
# App.py
# Application stuff.
# The application is responsible for managing the main frame window.
#
# We also grab the FileOpen command, to invoke our Python editor
" The PythonWin application code. Manages most aspects of MDI, etc "
import win32con
import win32api
import win32ui
import sys
import string
import os
from pywin.mfc import window, dialog, thread, afxres
import traceback
from pywin.framework import scriptutils
## NOTE: App and AppBuild should NOT be used - instead, you should contruct your
## APP class manually whenever you like (just ensure you leave these 2 params None!)
## Whoever wants the generic "Application" should get it via win32iu.GetApp()
# These are "legacy"
AppBuilder = None
App = None # default - if used, must end up a CApp derived class.
# Helpers that should one day be removed!
def AddIdleHandler(handler):
print "app.AddIdleHandler is deprecated - please use win32ui.GetApp().AddIdleHandler() instead."
return win32ui.GetApp().AddIdleHandler(handler)
def DeleteIdleHandler(handler):
print "app.DeleteIdleHandler is deprecated - please use win32ui.GetApp().DeleteIdleHandler() instead."
return win32ui.GetApp().DeleteIdleHandler(handler)
# Helper for writing a Window position by name, and later loading it.
def SaveWindowSize(section,rect,state=""):
""" Writes a rectangle to an INI file
Args: section = section name in the applications INI file
rect = a rectangle in a (cy, cx, y, x) tuple
(same format as CREATESTRUCT position tuples)."""
left, top, right, bottom = rect
if state: state = state + " "
win32ui.WriteProfileVal(section,state+"left",left)
win32ui.WriteProfileVal(section,state+"top",top)
win32ui.WriteProfileVal(section,state+"right",right)
win32ui.WriteProfileVal(section,state+"bottom",bottom)
def LoadWindowSize(section, state=""):
""" Loads a section from an INI file, and returns a rect in a tuple (see SaveWindowSize)"""
if state: state = state + " "
left = win32ui.GetProfileVal(section,state+"left",0)
top = win32ui.GetProfileVal(section,state+"top",0)
right = win32ui.GetProfileVal(section,state+"right",0)
bottom = win32ui.GetProfileVal(section,state+"bottom",0)
return (left, top, right, bottom)
def RectToCreateStructRect(rect):
return (rect[3]-rect[1], rect[2]-rect[0], rect[1], rect[0] )
# Define FrameWindow and Application objects
#
# The Main Frame of the application.
class MainFrame(window.MDIFrameWnd):
sectionPos = "Main Window"
statusBarIndicators = ( afxres.ID_SEPARATOR, #// status line indicator
afxres.ID_INDICATOR_CAPS,
afxres.ID_INDICATOR_NUM,
afxres.ID_INDICATOR_SCRL,
win32ui.ID_INDICATOR_LINENUM,
win32ui.ID_INDICATOR_COLNUM )
def OnCreate(self, cs):
self._CreateStatusBar()
return 0
def _CreateStatusBar(self):
self.statusBar = win32ui.CreateStatusBar(self)
self.statusBar.SetIndicators(self.statusBarIndicators)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_LINENUM)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_COLNUM)
def OnUpdatePosIndicator(self, cmdui):
editControl = scriptutils.GetActiveEditControl()
value = " " * 5
if editControl is not None:
try:
startChar, endChar = editControl.GetSel()
lineNo = editControl.LineFromChar(startChar)
colNo = endChar - editControl.LineIndex(lineNo)
if cmdui.m_nID==win32ui.ID_INDICATOR_LINENUM:
value = "%0*d" % (5, lineNo + 1)
else:
value = "%0*d" % (3, colNo + 1)
except win32ui.error:
pass
cmdui.SetText(value)
cmdui.Enable()
def PreCreateWindow(self, cc):
cc = self._obj_.PreCreateWindow(cc)
pos = LoadWindowSize(self.sectionPos)
self.startRect = pos
if pos[2] - pos[0]:
rect = RectToCreateStructRect(pos)
cc = cc[0], cc[1], cc[2], cc[3], rect, cc[5], cc[6], cc[7], cc[8]
return cc
def OnDestroy(self, msg):
# use GetWindowPlacement(), as it works even when min'd or max'd
rectNow = self.GetWindowPlacement()[4]
if rectNow != self.startRect:
SaveWindowSize(self.sectionPos, rectNow)
return 0
class CApp(thread.WinApp):
" A class for the application "
def __init__(self):
self.oldCallbackCaller = None
thread.WinApp.__init__(self, win32ui.GetApp() )
self.idleHandlers = []
def InitInstance(self):
" Called to crank up the app "
numMRU = win32ui.GetProfileVal("Settings","Recent File List Size", 10)
win32ui.LoadStdProfileSettings(numMRU)
# self._obj_.InitMDIInstance()
if win32api.GetVersionEx()[0]<4:
win32ui.SetDialogBkColor()
win32ui.Enable3dControls()
# install a "callback caller" - a manager for the callbacks
# self.oldCallbackCaller = win32ui.InstallCallbackCaller(self.CallbackManager)
self.LoadMainFrame()
self.SetApplicationPaths()
def ExitInstance(self):
" Called as the app dies - too late to prevent it here! "
win32ui.OutputDebug("Application shutdown\n")
# Restore the callback manager, if any.
try:
win32ui.InstallCallbackCaller(self.oldCallbackCaller)
except AttributeError:
pass
if self.oldCallbackCaller:
del self.oldCallbackCaller
self.frame=None # clean Python references to the now destroyed window object.
self.idleHandlers = []
# Attempt cleanup if not already done!
if self._obj_: self._obj_.AttachObject(None)
self._obj_ = None
global App
global AppBuilder
App = None
AppBuilder = None
return 0
def HaveIdleHandler(self, handler):
return handler in self.idleHandlers
def AddIdleHandler(self, handler):
self.idleHandlers.append(handler)
def DeleteIdleHandler(self, handler):
self.idleHandlers.remove(handler)
def OnIdle(self, count):
try:
ret = 0
handlers = self.idleHandlers[:] # copy list, as may be modified during loop
for handler in handlers:
try:
thisRet = handler(handler, count)
except:
print "Idle handler %s failed" % (`handler`)
traceback.print_exc()
print "Idle handler removed from list"
try:
self.DeleteIdleHandler(handler)
except ValueError: # Item not in list.
pass
thisRet = 0
ret = ret or thisRet
return ret
except KeyboardInterrupt:
pass
def CreateMainFrame(self):
return MainFrame()
def LoadMainFrame(self):
" Create the main applications frame "
self.frame = self.CreateMainFrame()
self.SetMainFrame(self.frame)
self.frame.LoadFrame(win32ui.IDR_MAINFRAME, win32con.WS_OVERLAPPEDWINDOW)
self.frame.DragAcceptFiles() # we can accept these.
self.frame.ShowWindow(win32ui.GetInitialStateRequest())
self.frame.UpdateWindow()
self.HookCommands()
def OnHelp(self,id, code):
try:
import regutil
if id==win32ui.ID_HELP_GUI_REF:
helpFile = regutil.GetRegisteredHelpFile("Pythonwin Reference")
helpCmd = win32con.HELP_CONTENTS
else:
helpFile = regutil.GetRegisteredHelpFile("Main Python Documentation")
helpCmd = win32con.HELP_FINDER
if helpFile is None:
win32ui.MessageBox("The help file is not registered!")
else:
import help
help.OpenHelpFile(helpFile, helpCmd)
except:
t, v, tb = sys.exc_info()
win32ui.MessageBox("Internal error in help file processing\r\n%s: %s" % (t,v))
tb = None # Prevent a cycle
def DoLoadModules(self, modules):
# XXX - this should go, but the debugger uses it :-(
# dont do much checking!
for module in modules:
__import__(module)
def HookCommands(self):
self.frame.HookMessage(self.OnDropFiles,win32con.WM_DROPFILES)
self.HookCommand(self.HandleOnFileOpen,win32ui.ID_FILE_OPEN)
self.HookCommand(self.HandleOnFileNew,win32ui.ID_FILE_NEW)
self.HookCommand(self.OnFileMRU,win32ui.ID_FILE_MRU_FILE1)
self.HookCommand(self.OnHelpAbout,win32ui.ID_APP_ABOUT)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_PYTHON)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_GUI_REF)
# Hook for the right-click menu.
self.frame.GetWindow(win32con.GW_CHILD).HookMessage(self.OnRClick,win32con.WM_RBUTTONDOWN)
def SetApplicationPaths(self):
# Load the users/application paths
new_path = []
apppath=string.splitfields(win32ui.GetProfileVal('Python','Application Path',''),';')
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
for extra_num in range(1,11):
apppath=string.splitfields(win32ui.GetProfileVal('Python','Application Path %d'%extra_num,''),';')
if len(apppath) == 0:
break
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
sys.path = new_path + sys.path
def OnRClick(self,params):
" Handle right click message "
# put up the entire FILE menu!
menu = win32ui.LoadMenu(win32ui.IDR_TEXTTYPE).GetSubMenu(0)
menu.TrackPopupMenu(params[5]) # track at mouse position.
return 0
def OnDropFiles(self,msg):
" Handle a file being dropped from file manager "
hDropInfo = msg[2]
self.frame.SetActiveWindow() # active us
nFiles = win32api.DragQueryFile(hDropInfo)
try:
for iFile in range(0,nFiles):
fileName = win32api.DragQueryFile(hDropInfo, iFile)
win32ui.GetApp().OpenDocumentFile( fileName )
finally:
win32api.DragFinish(hDropInfo);
return 0
# No longer used by Pythonwin, as the C++ code has this same basic functionality
# but handles errors slightly better.
# It all still works, tho, so if you need similar functionality, you can use it.
# Therefore I havent deleted this code completely!
# def CallbackManager( self, ob, args = () ):
# """Manage win32 callbacks. Trap exceptions, report on them, then return 'All OK'
# to the frame-work. """
# import traceback
# try:
# ret = apply(ob, args)
# return ret
# except:
# # take copies of the exception values, else other (handled) exceptions may get
# # copied over by the other fns called.
# win32ui.SetStatusText('An exception occured in a windows command handler.')
# t, v, tb = sys.exc_info()
# traceback.print_exception(t, v, tb.tb_next)
# try:
# sys.stdout.flush()
# except (NameError, AttributeError):
# pass
# Command handlers.
def OnFileMRU( self, id, code ):
" Called when a File 1-n message is recieved "
fileName = win32ui.GetRecentFileList()[id - win32ui.ID_FILE_MRU_FILE1]
win32ui.GetApp().OpenDocumentFile(fileName)
def HandleOnFileOpen( self, id, code ):
" Called when FileOpen message is received "
win32ui.GetApp().OnFileOpen()
def HandleOnFileNew( self, id, code ):
" Called when FileNew message is received "
win32ui.GetApp().OnFileNew()
def OnHelpAbout( self, id, code ):
" Called when HelpAbout message is received. Displays the About dialog. "
win32ui.InitRichEdit()
dlg=AboutBox()
dlg.DoModal()
def _GetRegistryValue(key, val, default = None):
# val is registry value - None for default val.
try:
hkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
try:
hkey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
return default
scintilla = "Scintilla is Copyright 1998-2008 Neil Hodgson (http://www.scintilla.org)"
idle = "This program uses IDLE extensions by Guido van Rossum, Tim Peters and others."
contributors = "Thanks to the following people for making significant contributions: Roger Upole, Sidnei da Silva, Sam Rushing, Curt Hagenlocher, Dave Brennan, Roger Burnham, Gordon McMillan, Neil Hodgson, Laramie Leavitt. (let me know if I have forgotten you!)"
# The About Box
class AboutBox(dialog.Dialog):
def __init__(self, idd=win32ui.IDD_ABOUTBOX):
dialog.Dialog.__init__(self, idd)
def OnInitDialog(self):
text = "Pythonwin - Python IDE and GUI Framework for Windows.\n\n%s\n\nPython is %s\n\n%s\n\n%s\n\n%s" % (win32ui.copyright, sys.copyright, scintilla, idle, contributors)
self.SetDlgItemText(win32ui.IDC_EDIT1, text)
# Get the build number - written by installers.
# For distutils build, read pywin32.version.txt
import distutils.sysconfig
site_packages = distutils.sysconfig.get_python_lib(plat_specific=1)
try:
build_no = open(os.path.join(site_packages, "pywin32.version.txt")).read().strip()
ver = "pywin32 build %s" % build_no
except EnvironmentError:
ver = None
if ver is None:
# See if we are Part of Active Python
ver = _GetRegistryValue("SOFTWARE\\ActiveState\\ActivePython", "CurrentVersion")
if ver is not None:
ver = "ActivePython build %s" % (ver,)
if ver is None:
ver = ""
self.SetDlgItemText(win32ui.IDC_ABOUT_VERSION, ver)
self.HookCommand(self.OnButHomePage, win32ui.IDC_BUTTON1)
def OnButHomePage(self, id, code):
if code == win32con.BN_CLICKED:
win32api.ShellExecute(0, "open", "http://starship.python.net/crew/mhammond/win32", None, "", 1)
def Win32RawInput(prompt=None):
"Provide raw_input() for gui apps"
# flush stderr/out first.
try:
sys.stdout.flush()
sys.stderr.flush()
except:
pass
if prompt is None: prompt = ""
ret=dialog.GetSimpleInput(prompt)
if ret==None:
raise KeyboardInterrupt, "operation cancelled"
return ret
def Win32Input(prompt=None):
"Provide input() for gui apps"
return eval(raw_input(prompt))
sys.modules['__builtin__'].raw_input=Win32RawInput
sys.modules['__builtin__'].input=Win32Input
try:
# LocatePythonFile used to be here.
# THIS WILL BE DELETED SOON.
from scriptutils import LocatePythonFile
except:
pass
def HaveGoodGUI():
"""Returns true if we currently have a good gui available.
"""
return sys.modules.has_key("pywin.framework.startup")
def CreateDefaultGUI( appClass = None):
"""Creates a default GUI environment
"""
if appClass is None:
import intpyapp # Bring in the default app - could be param'd later.
appClass = intpyapp.InteractivePythonApp
# Create and init the app.
appClass().InitInstance()
def CheckCreateDefaultGUI():
"""Checks and creates if necessary a default GUI environment.
"""
rc = HaveGoodGUI()
if not rc:
CreateDefaultGUI()
return rc
| {
"repo_name": "Southpaw-TACTIC/Team",
"path": "src/python/Lib/site-packages/pythonwin/pywin/framework/app.py",
"copies": "1",
"size": "14375",
"license": "epl-1.0",
"hash": 403607803519312260,
"line_mean": 33.5816831683,
"line_max": 262,
"alpha_frac": 0.7007304348,
"autogenerated": false,
"ratio": 3.1175450010843635,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43182754358843634,
"avg_score": null,
"num_lines": null
} |
#!/app/python3.5/bin/python3.5
import os
import sys
import shutil
projcet="ALL/"
user="xiaoyong/"
class all:
def cmd(self,version):
res=os.system("mkdir -p /code/"+user)
if res==0:
print("代码目录创建成功")
os.chdir("/code/"+user)
res=os.system("git clone https://github.com/xiaoyongaa/ALL.git")
if res==0:
msg="代码拉取成功!!!"
print(msg)
os.chdir(projcet)
res=os.system("git checkout"+" "+version)
if res==0:
msg="{name}项目{v}版本代码切换成功!!!".format(name=projcet,v=version)
res=os.system("tar -zcvf ALL_"+version+".tar.gz --exclude=.* *")
if res==0:
print("代码压缩成功")
res=os.system("mv *.tar.gz /code/"+user)
if res==0:
print("代码移动成功!")
shutil.rmtree("/code/"+user+projcet) #原来代码删除
#代码推送模块
else:
print("代码移动失败!")
else:
print("代码压缩失败")
else:
print("版本切换失败")
else:
print("代码拉取失败")
else:
print("代码目录创建失败")
def main(self):
try:
version=sys.argv[1]
print(version)
self.cmd(version)
except Exception as ex:
print("脚本的第一个版本传承不能为空,请输入版本号")
obj=all()
obj.main() | {
"repo_name": "xiaoyongaa/ALL",
"path": "启动停止脚本/git自动拉取代码脚本.py",
"copies": "1",
"size": "1767",
"license": "apache-2.0",
"hash": -3298676366177820000,
"line_mean": 29.3921568627,
"line_max": 86,
"alpha_frac": 0.4189799871,
"autogenerated": false,
"ratio": 3.0918163672654693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4010796354365469,
"avg_score": null,
"num_lines": null
} |
# app.py - Upper-Polo To Do List App
# This file defines a function named 'main' that
# implements the app runtime loop.
from todoclass import ToDoList
def print_menu():
# Display menu
print("\n\n1. Add Item")
print("2. Remove Item")
print("3. List Items")
print("4. Mark Done")
print("0. Exit\n\n")
def main():
my_list = ToDoList()
while True:
# Print todo list and menu on each iteration.
my_list.print_todo_list()
print_menu()
# Get user input
user_in = input()
# Add item
if user_in == "1":
print("Add Item")
my_item = input("Please enter To-Do item\n")
my_list.add(my_item)
# Remove item
elif user_in == "2":
my_list.print_todo_list()
my_list.item_remove()
# Show items in item_list
elif user_in == "3":
my_list.print_todo_list()
# Mark item complete.
elif user_in == "4":
my_list.print_todo_list()
my_list.item_mark_complete()
# Exit program
elif user_in == "0":
exit("0")
else:
print("invalid menu option")
main()
| {
"repo_name": "Upper-Polo/todo_list",
"path": "app.py",
"copies": "1",
"size": "1249",
"license": "mit",
"hash": 6257703254271599000,
"line_mean": 21.3035714286,
"line_max": 56,
"alpha_frac": 0.5052041633,
"autogenerated": false,
"ratio": 3.706231454005935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9659905005061037,
"avg_score": 0.010306122448979592,
"num_lines": 56
} |
#app_quobit models
from django.db import models
from django.contrib.auth.models import User
class QEvent(models.Model):
title = models.CharField(max_length=50)
created_by_user_id = models.IntegerField(max_length=50)
created_by_fbid = models.IntegerField(max_length=50)
created_by_username = models.CharField(max_length=30)
created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
class QBaseContent(models.Model):
user_id = models.IntegerField(max_length=50)
fbid = models.IntegerField(max_length=50)
username = models.CharField(max_length=30)
content = models.TextField(blank=True)
published_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
class Meta:
abstract = True
class QPost(QBaseContent):
qevent = models.ForeignKey(QEvent, related_name='posts')
def __unicode__(self):
return self.content
class QReply(QBaseContent):
qpost = models.ForeignKey(QPost, related_name='replies')
def __unicode__(self):
return self.content
class User(models.Model):
fbid = models.IntegerField(max_length=50)
username = models.CharField(max_length=30)
email = models.CharField(max_length=50)
created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
# class QEvent(models.Model):
# title = models.CharField(max_length=50)
# created_by = models.CharField(max_length=30)
# created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
# class QBaseContent(models.Model):
# author = models.CharField(default='Anonymous', max_length=30)
# content = models.TextField(blank=True)
# published_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
# class Meta:
# abstract = True
# class QPost(QBaseContent):
# qevent = models.ForeignKey(QEvent, related_name='posts')
# def __unicode__(self):
# return self.content
# class QReply(QBaseContent):
# qpost = models.ForeignKey(QPost, related_name='replies')
# def __unicode__(self):
# return self.content
# class User(models.Model):
# username = models.CharField(max_length=30)
# email = models.CharField(max_length=50)
# password = models.CharField(max_length=40)
# created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True) | {
"repo_name": "asen6/amartyasenguptadotcom",
"path": "app_quobit/models.py",
"copies": "1",
"size": "2213",
"license": "bsd-3-clause",
"hash": 4814943283798117000,
"line_mean": 29.75,
"line_max": 80,
"alpha_frac": 0.7415273385,
"autogenerated": false,
"ratio": 2.9585561497326203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9136955983172821,
"avg_score": 0.012625501011959799,
"num_lines": 72
} |
#app_quobit views
from django.core.cache import cache
from django.contrib.auth.forms import UserCreationForm
from django.views.generic.simple import direct_to_template
from django.http import HttpResponseRedirect
from django.views.generic.simple import direct_to_template
from django.shortcuts import render_to_response, get_object_or_404
from django.template import Context, loader
from django.http import HttpResponse
from django.utils import simplejson
# import json
# from django.core import serializers
# json_serializer = serializers.get_serializer("json")()
#MEMCACHE_GREETINGS = 'greetings'
import time
from app_quobit.models import QEvent, QPost, QReply, User
def events(request):
latest_events_list = QEvent.objects.all().order_by('-created_on')
return direct_to_template(request, 'app_quobit/events.html',
{'latest_events_list': latest_events_list})
def create_event(request):
if request.method == 'POST':
new_title = request.POST.get('new_event_title')
new_user_id = int(request.session['user_id'])
selected_user = get_object_or_404(User, id=new_user_id)
user_id = selected_user.id
fbid = selected_user.fbid
username = selected_user.username
new_event = QEvent(title=new_title, created_by_user_id=user_id, created_by_fbid=fbid, created_by_username=username)
new_event.save()
return HttpResponseRedirect("/projects/quobit/")
else:
return HttpResponse()
def event(request, event_id):
qevent = get_object_or_404(QEvent, id=int(event_id))
return direct_to_template(request, 'app_quobit/event.html',
{'qevent': qevent})
def set_user(request):
if request.method == 'POST':
new_fbid = request.POST.get('fbid')
new_username = request.POST.get('username')
new_email = request.POST.get('email')
return_code = 0
# check if user already exists. If not, register.
fbid_matches_list = User.objects.filter(fbid=new_fbid)
if len(fbid_matches_list) > 1:
return_code = 1
items_to_return = {'return_code':return_code}
return HttpResponse(simplejson.dumps(items_to_return))
elif len(fbid_matches_list) == 0:
# user not found. register and get user info
return_code = 2
new_user = User(fbid=new_fbid, username=new_username, email=new_email)
new_user.save()
fbid_matches_list = User.objects.filter(fbid=new_fbid)
user_id = fbid_matches_list[0].id
username = fbid_matches_list[0].username
# set cookie
request.session['user_id'] = user_id
request.session['username'] = username
# return user id and username
items_to_return = {'return_code':return_code, 'user_id':user_id, 'username':username}
return HttpResponse(simplejson.dumps(items_to_return))
else:
return HttpResponse()
def enter_qpost(request):
if request.method == 'POST':
event_id = int(request.POST.get('event_id'))
# user_id = request.POST.get('user_id')
user_id = int(request.session['user_id'])
selected_event = get_object_or_404(QEvent, id=event_id)
selected_user = get_object_or_404(User, id=user_id)
text = request.POST.get('new_qpost_text')
qpost = QPost(qevent=selected_event, user_id=selected_user.id, fbid=selected_user.fbid, username=selected_user.username, content=text)
qpost.save()
qpost_id = qpost.id
items_to_return = {'qpost_id': qpost_id, 'username': selected_user.username}
return HttpResponse(simplejson.dumps(items_to_return))
else:
return HttpResponse()
def enter_qreply(request):
if request.method == 'POST':
qpost_id = int(request.POST.get('qpost_id'))
user_id = int(request.session['user_id'])
text = request.POST['new_qreply_text']
selected_qpost = get_object_or_404(QPost, id=qpost_id)
selected_user = get_object_or_404(User, id=user_id)
qreply = QReply(qpost=selected_qpost, user_id=selected_user.id, fbid=selected_user.fbid, username=selected_user.username, content=text)
qreply.save()
qreply_id = qreply.id
items_to_return = {'qreply_id': qreply_id, 'username': selected_user.username}
return HttpResponse(simplejson.dumps(items_to_return))
else:
return HttpResponse()
def get_all_qposts_and_qreplies(request):
event_id = int(request.GET.get('event_id'))
current_chat_id = int(request.GET.get('current_chat_id'))
last_qreply_id = int(request.GET.get('last_qreply_id'))
latest_qposts_list = QPost.objects.all().filter(qevent__id=event_id).order_by('published_on')
latest_qreplies_list = QReply.objects.filter(qpost__id=current_chat_id).order_by('published_on')
qposts = []
qreplies = []
for curr_qpost in latest_qposts_list:
curr_qpost_dict = {'qpost_id': curr_qpost.id,
'author': curr_qpost.username,
'content': curr_qpost.content,
'published_on': time.mktime(curr_qpost.published_on.timetuple())
}
qposts.append(curr_qpost_dict)
for curr_qreply in latest_qreplies_list:
if curr_qreply.id > last_qreply_id:
curr_qreply_dict = {'qpost_id': curr_qreply.qpost.id,
'qreply_id': curr_qreply.id,
'author': curr_qreply.username,
'content': curr_qreply.content,
'published_on': time.mktime(curr_qreply.published_on.timetuple())
}
qreplies.append(curr_qreply_dict)
user_id = int(request.session['user_id'])
user = get_object_or_404(User, id=user_id)
user_dict = {'user_id':user.id, 'username':user.username, 'fbid':user.fbid}
items_to_return = {'qposts': qposts, 'qreplies': qreplies, 'user': user_dict}
return HttpResponse(simplejson.dumps(items_to_return))
def channel(request):
return direct_to_template(request, 'app_quobit/channel.html')
| {
"repo_name": "asen6/amartyasenguptadotcom",
"path": "app_quobit/views.py",
"copies": "1",
"size": "5668",
"license": "bsd-3-clause",
"hash": 1160748265949137400,
"line_mean": 32.1445783133,
"line_max": 137,
"alpha_frac": 0.6931898377,
"autogenerated": false,
"ratio": 2.9474778991159645,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41406677368159645,
"avg_score": null,
"num_lines": null
} |
import datetime
import random
import os
from time import sleep
import photos
import quotes
import tweets
# import posts
def clear_screen():
os.system("cls" if os.name == 'nt' else 'clear')
def bot_loop():
"""Repeatedly pulls random quote and tweets it"""
pic_file = 'quote-image.jpg'
previous_quotes = list()
print("*** Twitterbot running... ***")
while True:
# Creates new tweets and checks that we have not previously tweeted that quote
quote, author = quotes.get_random_quote()
while quote in previous_quotes:
quote, author = quotes.get_random_quote()
# If tweet has content
if quote:
# Creates random photo
photo = photos.get_photo(quote, author, pic_file)
# If photo is not None, share with photo. If not, share just text
if photo:
tweets.tweet_photo(quote, author, pic_file, photo["name"], photo["user"])
# posts.share_photo(quote, author, pic_file, photo["name"], photo["user"])
photos.delete_photo(pic_file)
else:
tweets.tweet(quote, author)
# posts.share(quote, author)
# Adds new tweet to our previous tweets
previous_quotes.insert(0, quote)
print("*** Quote shared: {}***".format(quote))
if len(previous_quotes) > 5:
previous_quotes.pop()
now = datetime.datetime.now().strftime('%I:%M %p')
print("*** Last tweet time: {}***".format(now))
# Prepares time until next tweet
hours = random.randint(3, 5)
minutes = random.randint(1, 59)
print("*** Sleeping for {} hours and {} minutes ***".format(hours, minutes))
sleep((minutes + (hours * 60)) * 60)
if __name__ == '__main__':
clear_screen()
print("*** Starting twitterbot... ***")
bot_loop()
| {
"repo_name": "fpcorso/twitter-quotes-bot",
"path": "quote-bot.py",
"copies": "1",
"size": "1770",
"license": "mit",
"hash": 4954786426132562000,
"line_mean": 25.4179104478,
"line_max": 80,
"alpha_frac": 0.6655367232,
"autogenerated": false,
"ratio": 3.1607142857142856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4326251008914286,
"avg_score": null,
"num_lines": null
} |
"""App related models."""
from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from . import constants
from . import lib
class DNSRecordManager(models.Manager):
"""Custom manager for DNSRecord."""
def get_or_create_for_domain(self, domain, rtype, ttl=7200):
"""Get or create DNS record for given domain.
DNS queries are not performed while `ttl` (in seconds) is still valid.
"""
now = timezone.now()
record = self.get_queryset().filter(
domain=domain, type=rtype, updated__gt=now).first()
if record:
return record
self.get_queryset().filter(domain=domain, type=rtype).delete()
record = DNSRecord(domain=domain, type=rtype)
record.get_dns_record()
if not record.value:
return
record.check_syntax(ttl)
record.save()
return record
@python_2_unicode_compatible
class DNSRecord(models.Model):
"""A model to store DNS records for Domain."""
domain = models.ForeignKey("admin.Domain", on_delete=models.CASCADE)
type = models.CharField(
max_length=15, choices=constants.DNS_RECORD_TYPES)
value = models.TextField(blank=True)
is_valid = models.BooleanField(default=False)
error = models.CharField(max_length=50, null=True, blank=True)
updated = models.DateTimeField(default=timezone.now)
objects = DNSRecordManager()
def __str__(self):
return "{} ({}): {}".format(self.domain, self.type, self.value)
def get_dns_record(self):
"""Retrieve corresponding DNS record."""
if self.type == "dkim":
self.value = lib.get_dkim_record(
self.domain.name, self.domain.dkim_key_selector)
else:
func = getattr(lib, "get_{}_record".format(self.type))
self.value = func(self.domain.name)
def check_syntax(self, ttl=7200):
"""Check record syntax."""
try:
func = getattr(lib, "check_{}_syntax".format(self.type))
result = func(self.value)
except lib.DNSSyntaxError as err:
self.error = str(err)
self.updated = timezone.now()
return
except AttributeError:
pass
if self.type == "dkim" and result != self.domain.dkim_public_key:
self.error = _("Public key mismatchs")
self.updated = timezone.now()
return
self.error = ""
self.is_valid = True
self.updated = timezone.now() + datetime.timedelta(seconds=ttl)
| {
"repo_name": "tonioo/modoboa",
"path": "modoboa/dnstools/models.py",
"copies": "1",
"size": "2728",
"license": "isc",
"hash": 6986418549691401000,
"line_mean": 31.0941176471,
"line_max": 78,
"alpha_frac": 0.6195014663,
"autogenerated": false,
"ratio": 4.0058737151248165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5125375181424816,
"avg_score": null,
"num_lines": null
} |
"""App related models."""
import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from . import constants
from . import lib
class DNSRecordManager(models.Manager):
"""Custom manager for DNSRecord."""
def get_or_create_for_domain(self, domain, rtype, ttl=7200):
"""Get or create DNS record for given domain.
DNS queries are not performed while `ttl` (in seconds) is still valid.
"""
now = timezone.now()
record = self.get_queryset().filter(
domain=domain, type=rtype, updated__gt=now).first()
if record:
return record
self.get_queryset().filter(domain=domain, type=rtype).delete()
record = DNSRecord(domain=domain, type=rtype)
record.get_dns_record()
if not record.value:
return
record.check_syntax(ttl)
record.save()
return record
@python_2_unicode_compatible
class DNSRecord(models.Model):
"""A model to store DNS records for Domain."""
domain = models.ForeignKey("admin.Domain", on_delete=models.CASCADE)
type = models.CharField(
max_length=15, choices=constants.DNS_RECORD_TYPES)
value = models.TextField(blank=True)
is_valid = models.BooleanField(default=False)
error = models.CharField(max_length=50, null=True, blank=True)
updated = models.DateTimeField(default=timezone.now)
objects = DNSRecordManager()
def __str__(self):
return "{} ({}): {}".format(self.domain, self.type, self.value)
def get_dns_record(self):
"""Retrieve corresponding DNS record."""
if self.type == "dkim":
self.value = lib.get_dkim_record(
self.domain.name, self.domain.dkim_key_selector)
else:
func = getattr(lib, "get_{}_record".format(self.type))
self.value = func(self.domain.name)
def check_syntax(self, ttl=7200):
"""Check record syntax."""
try:
func = getattr(lib, "check_{}_syntax".format(self.type))
result = func(self.value)
except lib.DNSSyntaxError as err:
self.error = str(err)
self.updated = timezone.now()
return
except AttributeError:
pass
if self.type == "dkim" and result != self.domain.dkim_public_key:
self.error = _("Public key mismatchs")
self.updated = timezone.now()
return
self.error = ""
self.is_valid = True
self.updated = timezone.now() + datetime.timedelta(seconds=ttl)
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/dnstools/models.py",
"copies": "1",
"size": "2687",
"license": "isc",
"hash": -1916337480961860600,
"line_mean": 31.3734939759,
"line_max": 78,
"alpha_frac": 0.6174171939,
"autogenerated": false,
"ratio": 4.004470938897168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5121888132797168,
"avg_score": null,
"num_lines": null
} |
"""App. related serializers."""
from django.utils.translation import ugettext as _
from rest_framework import serializers
from ... import models
PERIODS = [
("day", "Day"),
("week", "Week"),
("month", "Month"),
("year", "Year"),
("custom", "Custom")
]
class StatisticsInputSerializer(serializers.Serializer):
"""Serializer used to filter statistics."""
gset = serializers.CharField()
period = serializers.ChoiceField(choices=PERIODS)
graphic = serializers.CharField(required=False)
searchquery = serializers.CharField(required=False)
start = serializers.DateField(required=False)
end = serializers.DateField(required=False)
def validate(self, data):
condition = (
data["period"] == "custom" and
(not data.get("start") or not data.get("end"))
)
if condition:
raise serializers.ValidationError(
_("You must provide start and end dates when period is custom")
)
return data
class GraphPointSerializer(serializers.Serializer):
"""A serializer to represent a point in a curve."""
x = serializers.FloatField()
y = serializers.FloatField()
class GraphCurveSerializer(serializers.Serializer):
"""A serializer to represent a curve in a graph."""
name = serializers.CharField()
backgroundColor = serializers.CharField()
data = GraphPointSerializer(many=True)
class GraphSerializer(serializers.Serializer):
"""A serializer to represent a graph."""
title = serializers.CharField()
series = GraphCurveSerializer(many=True)
class StatisticsSerializer(serializers.Serializer):
"""Serializer to return statistics."""
graphs = GraphSerializer(many=True)
class MaillogSerializer(serializers.ModelSerializer):
"""Serializer for Maillog model."""
class Meta:
fields = (
"id", "queue_id", "date", "sender", "rcpt", "original_rcpt",
"size", "status"
)
model = models.Maillog
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/maillog/api/v2/serializers.py",
"copies": "1",
"size": "2029",
"license": "isc",
"hash": 1689392112415872800,
"line_mean": 25.3506493506,
"line_max": 79,
"alpha_frac": 0.6550024643,
"autogenerated": false,
"ratio": 4.488938053097345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5643940517397346,
"avg_score": null,
"num_lines": null
} |
"""App related serializers."""
from rest_framework import serializers
from modoboa.admin import models as admin_models
from ... import models
class MXRecordSerializer(serializers.ModelSerializer):
"""Serializer for MXRecord."""
class Meta:
model = admin_models.MXRecord
fields = ("name", "address", "updated")
class DNSBLResultSerializer(serializers.ModelSerializer):
"""Serializer for DNSBLResult."""
mx = MXRecordSerializer()
class Meta:
model = admin_models.DNSBLResult
fields = ("provider", "mx", "status")
class DNSRecordSerializer(serializers.ModelSerializer):
"""Serializer for DNSRecord."""
class Meta:
model = models.DNSRecord
fields = ("type", "value", "is_valid", "error", "updated")
class DNSDetailSerializer(serializers.ModelSerializer):
mx_records = MXRecordSerializer(many=True, source="mxrecord_set")
autoconfig_record = DNSRecordSerializer()
autodiscover_record = DNSRecordSerializer()
spf_record = DNSRecordSerializer()
dkim_record = DNSRecordSerializer()
dmarc_record = DNSRecordSerializer()
dnsbl_results = DNSBLResultSerializer(many=True, source="dnsblresult_set")
class Meta:
model = admin_models.Domain
fields = (
"mx_records", "dnsbl_results", "autoconfig_record",
"autodiscover_record", "spf_record", "dkim_record", "dmarc_record"
)
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/dnstools/api/v2/serializers.py",
"copies": "1",
"size": "1431",
"license": "isc",
"hash": -8862499637446831000,
"line_mean": 27.0588235294,
"line_max": 78,
"alpha_frac": 0.679245283,
"autogenerated": false,
"ratio": 4.053824362606233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5233069645606232,
"avg_score": null,
"num_lines": null
} |
"""App related signal handlers."""
import redis
from django.conf import settings
from django.db.models import signals
from django.dispatch import receiver
from modoboa.admin import models as admin_models
from . import constants
def set_message_limit(instance, key):
"""Store message limit in Redis."""
old_message_limit = instance._loaded_values.get("message_limit")
if old_message_limit == instance.message_limit:
return
rclient = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_QUOTA_DB
)
if instance.message_limit is None:
# delete existing key
if rclient.hexists(constants.REDIS_HASHNAME, key):
rclient.hdel(constants.REDIS_HASHNAME, key)
return
if old_message_limit is not None:
diff = instance.message_limit - old_message_limit
else:
diff = instance.message_limit
rclient.hincrby(constants.REDIS_HASHNAME, key, diff)
@receiver(signals.post_save, sender=admin_models.Domain)
def set_domain_message_limit(sender, instance, created, **kwargs):
"""Store domain message limit in Redis."""
set_message_limit(instance, instance.name)
@receiver(signals.post_save, sender=admin_models.Mailbox)
def set_mailbox_message_limit(sender, instance, created, **kwargs):
"""Store mailbox message limit in Redis."""
set_message_limit(instance, instance.full_address)
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/policyd/handlers.py",
"copies": "1",
"size": "1435",
"license": "isc",
"hash": 1874189273512701000,
"line_mean": 30.8888888889,
"line_max": 68,
"alpha_frac": 0.7045296167,
"autogenerated": false,
"ratio": 3.727272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49318023439727265,
"avg_score": null,
"num_lines": null
} |
"""App. related viewsets."""
import time
from django.db.models import Q
from drf_spectacular.utils import extend_schema
from rest_framework import filters, permissions, response, viewsets
from modoboa.admin import models as admin_models
from modoboa.lib import pagination
from ... import models
from ... import signals
from . import serializers
class StatisticsViewSet(viewsets.ViewSet):
"""A viewset to provide extra route related to mail statistics."""
permission_classes = (permissions.IsAuthenticated, )
@extend_schema(
parameters=[serializers.StatisticsInputSerializer],
responses={200: serializers.StatisticsSerializer}
)
def list(self, request, **kwargs):
serializer = serializers.StatisticsInputSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
graph_sets = {}
for result in signals.get_graph_sets.send(
sender="index", user=request.user):
graph_sets.update(result[1])
gset = serializer.validated_data["gset"]
fname = graph_sets[gset].get_file_name(
request.user, serializer.validated_data.get("searchquery"))
period = serializer.validated_data["period"]
if period == "custom":
start = int(
time.mktime(serializer.validated_data["start"].timetuple())
)
end = int(
time.mktime(serializer.validated_data["end"].timetuple())
)
else:
end = int(time.mktime(time.localtime()))
start = "-1{}".format(period)
graphs = graph_sets[gset].export(
fname, start, end, serializer.validated_data.get("graphic")
)
return response.Response({"graphs": graphs})
class MaillogViewSet(viewsets.ReadOnlyModelViewSet):
"""Simple viewset to access message log."""
filter_backends = [filters.OrderingFilter, filters.SearchFilter]
ordering = ["-date"]
ordering_fields = "__all__"
pagination_class = pagination.CustomPageNumberPagination
permissions = (permissions.IsAuthenticated, )
search_fields = ["queue_id", "sender", "rcpt", "original_rcpt", "status"]
serializer_class = serializers.MaillogSerializer
def get_queryset(self):
"""Filter queryset based on current user."""
domains = admin_models.Domain.objects.get_for_admin(self.request.user)
return models.Maillog.objects.filter(
Q(from_domain__in=domains) | Q(to_domain__in=domains)
)
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/maillog/api/v2/viewsets.py",
"copies": "1",
"size": "2523",
"license": "isc",
"hash": 505269602472346430,
"line_mean": 35.0428571429,
"line_max": 78,
"alpha_frac": 0.6547760602,
"autogenerated": false,
"ratio": 4.212020033388981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5366796093588981,
"avg_score": null,
"num_lines": null
} |
"""App related views."""
from django.utils.translation import ugettext as _
from django.views import generic
from django.contrib.auth import mixins as auth_mixins
from modoboa.admin import models as admin_models
from . import models
class DomainAccessRequiredMixin(auth_mixins.AccessMixin):
"""Check if user can access domain."""
def dispatch(self, request, *args, **kwargs):
if not request.user.can_access(self.get_object()):
return self.handle_no_permission()
return super(DomainAccessRequiredMixin, self).dispatch(
request, *args, **kwargs)
class DNSRecordDetailView(
auth_mixins.LoginRequiredMixin,
DomainAccessRequiredMixin,
generic.DetailView):
"""View to display MX records."""
model = models.DNSRecord
template_name = "dnstools/dns_record_detail.html"
def get_context_data(self, **kwargs):
"""Add extra variables."""
context = super(DNSRecordDetailView, self).get_context_data(**kwargs)
context.update({
"title": _("{} record of {}").format(
self.object.type.upper(), self.object.domain)
})
return context
class AutoConfigRecordsStatusView(
auth_mixins.LoginRequiredMixin,
auth_mixins.PermissionRequiredMixin,
generic.DetailView):
"""Autoconfig records status view."""
model = admin_models.Domain
permission_required = "admin.view_domain"
template_name = "dnstools/autoconfig_records_status.html"
def get_queryset(self):
"""Add some prefetching."""
return (
admin_models.Domain.objects.get_for_admin(self.request.user)
.prefetch_related("dnsrecord_set")
)
def get_context_data(self, **kwargs):
"""Add extra data to context."""
context = super(AutoConfigRecordsStatusView, self).get_context_data(
**kwargs)
context.update({
"title": _("Auto configuration records for {}").format(
self.object.name)
})
return context
class DomainDNSConfigurationView(
auth_mixins.LoginRequiredMixin,
DomainAccessRequiredMixin,
generic.DetailView):
"""Page to display DNS configuration for a domain."""
model = admin_models.Domain
template_name = "dnstools/domain_dns_configuration.html"
def get_context_data(self, **kwargs):
"""Add extra variables."""
context = super(DomainDNSConfigurationView, self).get_context_data(
**kwargs)
context.update({
"title": _("DNS configuration for {}").format(self.object)
})
return context
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/dnstools/views.py",
"copies": "2",
"size": "2669",
"license": "isc",
"hash": -6050484345232529000,
"line_mean": 30.0348837209,
"line_max": 77,
"alpha_frac": 0.6399400525,
"autogenerated": false,
"ratio": 4.297906602254429,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5937846654754428,
"avg_score": null,
"num_lines": null
} |
"""apprelease URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from rest_framework import routers
from django.contrib import admin
# from release import views
from rest_framework.authtoken import views as restviews
# from django.contrib.auth.models import User
# from rest_framework.authtoken.models import Token
# for user in User.objects.all():
# Token.objects.get_or_create(user=user)
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
# router.register(r'applications', views.ApplicationViewSet)
# router.register(r'releases', views.ReleaseViewSet)
# router.register(r'flavours', views.FlavourViewSet)
# router.register(r'platforms', views.PlatformViewSet)
# router.register(r'environments', views.EnvironmentViewSet)
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-token-auth/', restviews.obtain_auth_token),
# url(r'^applications', 'release.views.view_applications', name='view_applications'),
# url(r'^environments/(?P<application>[0-9]+)/$', views.view_environments, name='view_environments'),
# url(r'^releases/(?P<application>[0-9]+)/$', views.view_releases_by_application, name='view_releases_by_application'),
# url(r'^release/(?P<release>[0-9]+)/$', views.view_release, name='view_release'),
]
| {
"repo_name": "gabrielferreira/apprelease",
"path": "apprelease/urls.py",
"copies": "1",
"size": "2039",
"license": "apache-2.0",
"hash": -8333883704245282000,
"line_mean": 44.3111111111,
"line_max": 123,
"alpha_frac": 0.721922511,
"autogenerated": false,
"ratio": 3.5215889464594126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4743511457459412,
"avg_score": null,
"num_lines": null
} |
"""Apprise platform for notify component."""
import logging
import apprise
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_URL
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_FILE = "config"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_URL): vol.All(cv.ensure_list, [str]),
vol.Optional(CONF_FILE): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Apprise notification service."""
# Create our Apprise Instance (reference our asset)
a_obj = apprise.Apprise()
if config.get(CONF_FILE):
# Sourced from a Configuration File
a_config = apprise.AppriseConfig()
if not a_config.add(config[CONF_FILE]):
_LOGGER.error("Invalid Apprise config url provided")
return None
if not a_obj.add(a_config):
_LOGGER.error("Invalid Apprise config url provided")
return None
# Ordered list of URLs
if config.get(CONF_URL) and not a_obj.add(config[CONF_URL]):
_LOGGER.error("Invalid Apprise URL(s) supplied")
return None
return AppriseNotificationService(a_obj)
class AppriseNotificationService(BaseNotificationService):
"""Implement the notification service for Apprise."""
def __init__(self, a_obj):
"""Initialize the service."""
self.apprise = a_obj
def send_message(self, message="", **kwargs):
"""Send a message to a specified target.
If no target/tags are specified, then services are notified as is
However, if any tags are specified, then they will be applied
to the notification causing filtering (if set up that way).
"""
targets = kwargs.get(ATTR_TARGET)
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
self.apprise.notify(body=message, title=title, tag=targets)
| {
"repo_name": "kennedyshead/home-assistant",
"path": "homeassistant/components/apprise/notify.py",
"copies": "2",
"size": "2074",
"license": "apache-2.0",
"hash": -5892029862035679000,
"line_mean": 29.0579710145,
"line_max": 73,
"alpha_frac": 0.6663452266,
"autogenerated": false,
"ratio": 4.027184466019418,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5693529692619418,
"avg_score": null,
"num_lines": null
} |
# Approach 1. Use a tuple to store data and min in every stack element
class StackWithMin(object):
def __init__(self, storage = []):
self.storage = storage
def empty(self):
return len(self.storage) == 0
def push(self, value):
if len(self.storage) == 0 or value < self.storage[-1][1]:
self.storage.append((value, value))
else:
self.storage.append((value, self.storage[-1][1]))
def pop(self):
return self.storage.pop()[0]
def get_top(self):
if len(self.storage) == 0: return None
return self.storage[-1][0]
def get_min(self):
if len(self.storage) == 0: return None
return self.storage[-1][1]
# Approach 2. Use an additional python list to keep track of mins (Save space)
class StackWithMin2:
def __init__(self):
self.stack = []
self.min = []
def push(self, value):
self.stack.append(value)
if len(self.min) == 0 or value <= self.min[-1]:
self.min.append(value)
def pop(self):
if len(self.stack) == 0:
return None
data = self.stack.pop()
if data == self.min[-1]:
self.min.pop()
return data
def get_min(self):
if len(self.min)==0:
return None
return self.min[-1]
# Testing
from random import randrange
S1 = StackWithMin()
S2 = StackWithMin2()
test_list = [randrange(100) for x in xrange(10)]
for num in test_list:
S1.push(num)
S2.push(num)
print num,
print ""
for i in xrange(len(test_list)):
print "new pop", S1.pop(), S2.pop()
print "new min", S1.get_min(), S2.get_min()
| {
"repo_name": "aattaran/Machine-Learning-with-Python",
"path": "CTCI/Chapter 3/Question3_2/ChapQ3.2.py",
"copies": "1",
"size": "1567",
"license": "bsd-3-clause",
"hash": 5569047754290778000,
"line_mean": 24.2903225806,
"line_max": 78,
"alpha_frac": 0.6056158264,
"autogenerated": false,
"ratio": 3.05458089668616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.416019672308616,
"avg_score": null,
"num_lines": null
} |
"""Approaches for calculating haplotype phasing of variants.
"""
import os
from bcbio import broad
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import shared
from bcbio.variation import bamprep
def has_variants(vcf_file):
with open(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
return True
return False
def read_backed_phasing(vcf_file, bam_files, genome_file, region, config):
"""Phase variants using GATK's read-backed phasing.
http://www.broadinstitute.org/gatk/gatkdocs/
org_broadinstitute_sting_gatk_walkers_phasing_ReadBackedPhasing.html
"""
if has_variants(vcf_file):
broad_runner = broad.runner_from_config(config)
out_file = "%s-phased%s" % os.path.splitext(vcf_file)
if not file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
params = ["-T", "ReadBackedPhasing",
"-R", genome_file,
"--variant", vcf_file,
"--out", tx_out_file,
"--downsample_to_coverage", "250",
"--downsampling_type", "BY_SAMPLE"]
for bam_file in bam_files:
params += ["-I", bam_file]
variant_regions = config["algorithm"].get("variant_regions", None)
region = shared.subset_variant_regions(variant_regions, region, out_file)
if region:
params += ["-L", bamprep.region_to_gatk(region),
"--interval_set_rule", "INTERSECTION"]
broad_runner.run_gatk(params)
return out_file
else:
return vcf_file
| {
"repo_name": "Cyberbio-Lab/bcbio-nextgen",
"path": "bcbio/variation/phasing.py",
"copies": "12",
"size": "1817",
"license": "mit",
"hash": 7322323050238250000,
"line_mean": 40.2954545455,
"line_max": 89,
"alpha_frac": 0.5767749037,
"autogenerated": false,
"ratio": 3.6930894308943087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016410664302230567,
"num_lines": 44
} |
# approach is way too slow
from collections import deque
def main():
global seed
def traverse_groups(x, y, group_no):
q = deque()
q.append((x,y))
count = 0
while len(q) > 0:
# if count > 1:
# exit()
count +=1
cx, cy = q.popleft()
# groups[cx][cy] = group_no
# print(f'{[cx,cy,groups[cx][cy]]}, {group_no}')
for dir in [[1,0],[0,1],[-1,0],[0,-1]]:
if 0 <= cx + dir[0] <= 99 and 0 <= cy + dir[1] <= 99:
nx = cx + dir[0]
ny = cy + dir[1]
# print(nx, ny)
if trees[nx][ny]:
if groups[nx][ny] != group_no:
groups[nx][ny] = group_no
# print(f'curr: {[cx,cy,groups[cx][cy]]} new tree {nx}, {ny} has {groups[nx][ny]} but should have {group_no}')
q.append((nx, ny))
while True:
try:
seed, steps = [int(x) for x in input().split()]
except EOFError:
break
trees = [[False for _ in range(100)] for _ in range(100)]
trees_dict = dict()
groups = [[-1 for _ in range(100)] for _ in range(100)]
latest_group_no = 0
fire_queries = [0 for _ in range(int(steps/100))]
for i in range(steps):
# if i == 30: break
while True:
new_tree = rand(seed) % 10000
new_tree_x = new_tree // 100
new_tree_y = new_tree % 100
# print(new_tree_x, new_tree_y, len(trees), len(trees[0]))
if not trees[new_tree_x][new_tree_y]:
trees[new_tree_x][new_tree_y] = True
trees_dict[i] = [new_tree_x, new_tree_y]
break
existing_groups = set([groups[x][y] for x, y in directions(new_tree_x, new_tree_y) if groups[x][y] is not -1])
# print(f'going to add new tree at {new_tree_x}, {new_tree_y} in group {latest_group_no} || {existing_groups}')
if len(existing_groups) == 0:
groups[new_tree_x][new_tree_y] = latest_group_no
latest_group_no += 1
else:
groups[new_tree_x][new_tree_y] = next(iter(existing_groups))
if len(existing_groups) > 1:
traverse_groups(new_tree_x, new_tree_y, groups[new_tree_x][new_tree_y])
# print([groups[x][y] for x, y in directions(new_tree_x, new_tree_y) if groups[x][y] is not -1])
tree_a = trees_dict.get(rand(seed) % (i+1))
tree_b = trees_dict.get(rand(seed) % (i+1))
# if trees[tree_a//100][tree_a%100] and trees[tree_b//100][tree_b%100]: # if both squares contain trees
if tree_a and tree_b:
# print(f'comparing {tree_a} {tree_a[0]}, {tree_a[1]} with {tree_b[0]}, {tree_b[1]}, \t groups are: {groups[tree_a[0]][tree_a[1]]} and {groups[tree_b[0]][tree_b[1]]}')
if groups[tree_a[0]][tree_a[1]] == groups[tree_b[0]][tree_b[1]]: # if both trees in same groups
if groups[tree_b[0]][tree_b[1]] != -1: # if group is not -1
fire_queries[i//100] += 1
# for row in groups:
# print(row)
print(" ".join(str(x) for x in fire_queries))
# exit()
def rand(s):
global seed
seed = (s*5171+13297) % 50021
return seed
def directions(x,y):
for dir in [[1,0],[0,1],[-1,0],[0,-1]]:
if 0 <= x + dir[0] <= 99 and 0 <= y + dir[1] <= 99:
yield x + dir[0], y + dir[1]
if __name__ == '__main__':
main()
| {
"repo_name": "rvrheenen/OpenKattis",
"path": "Python/forestfires/forestfires.py",
"copies": "1",
"size": "3703",
"license": "mit",
"hash": 3822340594949914600,
"line_mean": 38.3936170213,
"line_max": 183,
"alpha_frac": 0.4636780988,
"autogenerated": false,
"ratio": 3.236888111888112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.916416826889369,
"avg_score": 0.007279588358884478,
"num_lines": 94
} |
# app/roles/views.py
# coding: utf-8
from flask import flash
from flask import redirect, render_template, url_for
from flask_login import current_user, login_required
from app.roles import roles
from app.roles.forms import RolForm
from app import db
from app.models import Rol
def check_edit_or_admin():
"""
Si no es admin o editor lo manda al inicio
"""
if not current_user.get_urole() >= 1:
return redirect(url_for("home.hub"))
# SECCION: ***** Rol: PASTOR, ANCIANO; DIACONO, LIDER GRUPO CASERO *****
@roles.route('/roles/<string:flag>', methods=['GET'])
@login_required
def ver_roles(flag):
"""
Ver una lista de todos los roles
--- aunque es la misma tabla se mostrarán los distintos
--- tipos de roles, misnitreros, clases, en distintas pantallas
"""
check_edit_or_admin()
# si flag viene vacio ir por defecto a Roles
if (flag == ''):
flag = 'R'
# de arranque carga el listado
flag_listar = True
query_roles = Rol.query.filter_by(tipo_rol=flag)
return render_template('roles/base_roles.html',
roles=query_roles,
flag_listar=flag_listar,
flag_tiporol=flag)
@roles.route('/roles/crear/<string:flag>',
methods=['GET', 'POST'])
@login_required
def crear_rol(flag):
"""
Agregar un Rol a la Base de Datos
"""
check_edit_or_admin()
# Variable para el template. Para decirle si es Alta o Modif
flag_crear = True
flag_listar = False
form = RolForm()
if form.validate_on_submit():
obj_rol = Rol(nombre_rol=form.nombre_rol.data,
descripcion_rol=form.descripcion_rol.data,
tipo_rol=flag)
try:
db.session.add(obj_rol)
db.session.commit()
flash('Has guardado los datos correctamente', 'success')
except Exception as e:
flash('Error: ', e, ' danger')
return redirect(url_for('roles.ver_roles', flag=flag))
return render_template(
'roles/base_roles.html',
add_roles=flag_crear, flag_listar=flag_listar,
flag_tiporol=flag, form=form)
@roles.route('/roles/modificar/<int:id>/<string:flag>',
methods=['GET', 'POST'])
@login_required
def modif_rol(id, flag):
"""
Modificar un rol
"""
check_edit_or_admin()
flag_crear = False
flag_listar = False
obj_rol = Rol.query.get_or_404(id)
form = RolForm(obj=obj_rol)
if form.validate_on_submit():
obj_rol.nombre_rol = form.nombre_rol.data
obj_rol.descripcion_rol = form.descripcion_rol.data
obj_rol.tipo_rol = flag
try:
db.session.commit()
flash('Has modificado los datos correctamente', 'success')
except Exception as e:
flash('Error: ' + str(e), 'danger')
return redirect(url_for('roles.ver_roles', flag=flag))
form.nombre_rol.data = obj_rol.nombre_rol
form.descripcion_rol.data = obj_rol.descripcion_rol
return render_template(
'roles/base_roles.html',
add_roles=flag_crear, flag_listar=flag_listar,
form=form, rol=obj_rol, flag_tiporol=flag)
@roles.route('/roles/borrar/<int:id>/<string:flag>',
methods=['GET'])
@login_required
def borrar_rol(id, flag):
"""
Borrar un rol
"""
check_edit_or_admin()
obj_rol = Rol.query.get_or_404(id)
db.session.delete(obj_rol)
try:
db.session.commit()
flash('Has borrado los datos correctamente', 'success')
except Exception as e:
flash('Error: ' + str(e), 'danger')
return redirect(url_for('roles.ver_roles', flag=flag))
| {
"repo_name": "originaltebas/chmembers",
"path": "app/roles/views.py",
"copies": "1",
"size": "3907",
"license": "mit",
"hash": -2047602267157651200,
"line_mean": 26.5109489051,
"line_max": 72,
"alpha_frac": 0.5752688172,
"autogenerated": false,
"ratio": 3.324255319148936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9381275961166453,
"avg_score": 0.0036496350364963502,
"num_lines": 137
} |
@app.route('/api/v1.0/<int:api_key>/scrape/rushhour/new',methods=['GET'])
########scrapes the Rush Hour index page
def get_rush_hour_index(api_key):
if str(api_key)!=the_api_key:
return 401
#base_url = 'http://www.rushhour.nl/store_master.php?idxGroup=2&idxGenre=2&idxSubGenre=&app=250'
week = datetime.datetime.utcnow().isocalendar()[1]
year = datetime.datetime.utcnow().isocalendar()[0]
base_url = 'http://www.rushhour.nl/store_master.php?blNew=1&bIsOutOfStock=1&numYear=%s&numWeek=%s&app=250' % (year,week)
#for selenium
display = Display(visible=0, size=(800, 600))
display.start()
geckodriver_log_location = os.path.join(app.root_path, 'logs', 'geckodriver.log')
print(geckodriver_log_location)
# return geckodriver_log_location
####now get the HTML
try:
r = requests.get(base_url,timeout=5)
except Exception as e:
return "Failed to request the Rush Hour URL " + base_url, 405
#need to use selenium because of the popup
browser = webdriver.Firefox(log_path=geckodriver_log_location)
browser.get(base_url)
try:
alert = browser.switch_to_alert()
alert.accept()
print "alert accpted"
except:
print "no alert"
html = browser.page_source
browser.close()
display.sendstop()
soup = BeautifulSoup(html, "lxml")
for product in soup.find_all("div","item_wrap1"):
details = str()
label_html = str()
label = str()
label_url = str()
artist_title = str()
split_a_t = str()
artist = str()
title = str()
release_url = str()
the_release = product.find("div","item_content")
all_details = the_release.find("h2","title")
#print all_details
release_url = all_details.findAll("a")[0]['href']
url_split = release_url.split('=')
store_release_id = url_split[1]
print store_release_id
all_details_reg = all_details.text.split(' - ')
title = all_details_reg[1]
label = all_details_reg[2]
print title,label
if len(store_release_id)<1:
print('Didnt get the store id - skip')
continue
if len(label) < 3 or len(title) < 3:
print('skipping ' + title + ' or ' + label + ' as less than 3 characters')
continue
#sql = ('SELECT id FROM releases_all WHERE label_no_country LIKE %s AND title LIKE %s') % ('%' + label + '%','%' + title + '%')
try:
query = db_insert('INSERT INTO store_mappings (release_id,store,store_url,unique_key,store_release_id) SELECT id,%s,%s, md5(concat(id,%s)),%s FROM releases_all WHERE label_no_country LIKE %s AND title LIKE %s ON DUPLICATE KEY UPDATE store_url=values(store_url),store_release_id=values(store_release_id)', ('rushhour','/' + release_url,'rushhour',store_release_id,label + '%','%' + title + '%'))
data = query.fetchall()
print(query,data)
except Exception as e:
print(str(e))
continue
return base_url,201
@app.route('/api/v1.0/<int:api_key>/scrape/rushhour/release/<string:rushhour_id>',methods=['GET'])
def get_rushhour_release(api_key,rushhour_id):
if str(api_key)!=the_api_key:
return 401
base_url = 'http://www.rushhour.nl/store_detailed.php?item=' + rushhour_id
#for selenium
display = Display(visible=0, size=(800, 600))
display.start()
geckodriver_log_location = os.path.join(app.root_path, 'logs', 'geckodriver.log')
print(geckodriver_log_location)
# return geckodriver_log_location
####now get the HTML
try:
r = requests.get(base_url,timeout=5)
except Exception as e:
return "Failed to request the Rush Hour URL " + base_url, 405
#need to use selenium because of the popup
browser = webdriver.Firefox(log_path=geckodriver_log_location)
browser.get(base_url)
try:
alert = browser.switch_to_alert()
alert.accept()
print "alert accpted"
except:
print "no alert"
html = browser.page_source
browser.close()
display.sendstop()
soup = BeautifulSoup(html, "lxml")
stock_details = soup.findAll("img",class_="cart_icon")
print(stock_details)
cart_url = 'http://www.rushhour.nl/store_detailed.php?action=add&item=' + rushhour_id
if len(stock_details) > 0:
return jsonify({'store':'rushhour','in_stock':'true','cart_url':cart_url})
else:
return jsonify({'store':'rushhour','in_stock':'false','cart_url':cart_url}) | {
"repo_name": "siquick/ss_api",
"path": "api/retired_rushhour.py",
"copies": "2",
"size": "4591",
"license": "apache-2.0",
"hash": -6474510252118122000,
"line_mean": 32.7647058824,
"line_max": 406,
"alpha_frac": 0.6144630799,
"autogenerated": false,
"ratio": 3.324402606806662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4938865686706662,
"avg_score": null,
"num_lines": null
} |
@APP.route("/api/v1/channel/<int:channel>/permit/<int:user>",
methods=["GET", "PATCH", "DELETE"])
def chan_permit(channel, user):
"""
This endpoint is used to check and set permits on a per-channel, per-user
basis.
Methods:
- GET:
This method will return the information about a
"""
redis_key = "{}${}".format(channel, user)
if request.method == "GET":
if not REDIS_CONN.exists(redis_key):
# That individual permit does not exist, return a "succes"
return make_response(jsonify(None), 204)
else:
return make_response(
jsonify(
{
"permitted": True,
"remaining": REDIS_CONN.ttl(redis_key),
"userId": user
}
), 200
)
elif request.method == "PATCH":
expires_at = request.values.get("expires", None)
if expires_at is None or int(expires_at) < 5:
"""
Return 406 error, the expires parameter is REQUIRED when PATCH-ing
No infinite permits, should friend the user instead
Permit period must be longer than 5 seconds
"""
# TODO: Return an error explaining should friend the user
return make_response(jsonify(None), 406)
# If it's None, then the channel doesn't exist, we'll need to add it
if not REDIS_CONN.exists(redis_key):
# TODO: Add error catching on redis return code
REDIS_CONN.setex(redis_key, True, expires_at)
return make_response(
jsonify(
{
"permitted": True,
"remaining": REDIS_CONN.ttl(redis_key),
"userId": user
}
), 200
)
else:
# Key does exist, let's update the permit expiration
REDIS_CONN.setex(redis_key, True, expires_at)
return make_response(
jsonify(
{
"permitted": True,
"remaining": REDIS_CONN.ttl(redis_key),
"userId": user
}
), 200
)
elif request.method == "DELETE":
# Check if the key even exists
if REDIS_CONN.exists(redis_key):
# It does, let's continue
# TODO: Redis error catching
REDIS_CONN.delete(redis_key)
return make_response(jsonify(None), 200)
else:
# Key doesn't exist, can't delete it, return successful error
return make_response(jsonify(None), 204)
| {
"repo_name": "CactusDev/CactusAPI",
"path": "permit_examples.py",
"copies": "1",
"size": "2771",
"license": "mit",
"hash": 5575702641372676000,
"line_mean": 34.0759493671,
"line_max": 78,
"alpha_frac": 0.4954889931,
"autogenerated": false,
"ratio": 4.602990033222591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5598479026322591,
"avg_score": null,
"num_lines": null
} |
@app.route('/log')
def log():
if 'user' is session:
user_session - escape(session['user']).capitalize()
return render_template('login.html', user_session=user_session)
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if 'user' is session:
return redirect(url_for('login'))
if request.methods == 'POST':
user_form = request.form['user']
password_form = request.form['password']
cur.execute("SELECT COUNT(1) FROM user_admin WHERE user = %s;", [user_form])
if cur.fetchone()[0]:
cur.execute(" SELECT password FROM user_admin WHERE user = %s;", [user_form])
for row in cur.fetchall():
if md5(password_form).hexdigest() == row[0]:
session['user'] = request.form['user']
return redirect(url_for('admin'))
else:
error = "Ulangi !!!"
else:
error = "Ulangi !!!"
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('user', None)
return redirect(url_for('index'))
@app.route('/login_app', methods=['POST'])
def login_app():
error = None
if 'logged_in' is session:
return redirect(url_for('login'))
if request.method == 'POST':
result = sesi.execute("SELECT COUNT(1) FROM user_app WHERE nip= %s AND jabatan= %s AND password = %s ALLOW FILTERING ;", ((int(request.form['nip'])), request.form['jabatan'], request.form['password'] ))
if result.fetch()[0]:
session['logged_in'] = True
return redirect(url_for('admin'))
else:
error = "Ulangi !!!"
return render_template('login.html', error=error)
form = LoginForm()
if form.validate_on_submit():
login_user(user)
flask.flash('Logged in successfully.')
next = flask.request.args.get('next')
if not is_safe_url(next):
return flask.abort(400)
return flask.redirect(next or flask.url_for('admin'))
@app.route('/home')
def home():
if not session.get('logged_in'):
return render_template('login.html')
else:
return "hello Boos!"
@app.route('/login', methods=['POST'])
def do_admin_login():
if request.form['password'] == 'password' and request.form['username'] == 'admin':
session['logged_in'] = True
else:
flash('wrong password!')
return home()
if not session.get('logged_in'):
return render_template('login.html')
else:
return render_template('admin.html')
error = None
if 'username' in session:
return redirect(url_for('admin'))
if request.method == 'POST':
username_form = request.form['username']
password_form = request.form['password']
user = db
if form.validate() == False:
return render_template('login_admin.html', form=form)
else:
session['username'] = form.username.data
return redirect(url_for('admin'))
elif request.method == 'GET':
return render_template('login_admin.html', form=form)
_nim = request.form['nim']
_nama_mhs = request.form['nama']
_angkatan = request.form['angkatan']
_tahun = request.form['Tlulus']
_prodi = request.form['jurusan']
_judul = request.form['judul']
_kata_kunci = request.form['kunci']
_intisari = request.form['Isari']
_pembimbing = request.form['PBimbing']
_password = request.form['pas']
_file_doc = request.form['file']
def __init__(self, nim, nama_mhs, angkatan, tahun, prodi, judul, kata_kunci, intisari, pembimbing, password, file_doc):
self.nim = nim
self.nama_mhs = nama_mhs
self.angkatan = angkatan
self.tahun = tahun
self.prodi = prodi
self.judul = judul
self.kata_kunci = kata_kunci
self.intisari = intisari
self.pembimbing = pembimbing
self.password = password
self.file_doc = file_doc
!! file index.html lama
{% for doc in data %}
<div class="col-md-8">
<h3>Judul : <a href="{{ url_for('detil_doc', nim=doc.nim, judul=doc.judul) }}"> {{ doc.judul }} </a></h3>
<p>Prodi : {{ doc.prodi }} | Tahun : {{ doc.tahun }} </p>
<hr>
</div>
{% endfor %}
!! footer, belum terpakai, blum bermanfaat
<div class="col-md-12">
<footer class="footer">
<div class="container">
<p class="text-muted">Copy right SISTEM Arsip Document STMIK AKAKOM</p>
</div>
</footer>
</div>
!! Pesan error
{% if error %}
<div class="alert alert-danger"> <strong>{{ error }}</strong></div>
{% endif %}
nim = nim,
intisari = intisari,
tahun = tahun,
nama_mhs = nama,
jurusan = jurusan,
pembimbing = dos_pembimbing,
kata_kunci = kunci_kata,
file1 = file_doc1,
file2 = file_doc2,
file3 = file_doc3,
file4 = file_doc4,
file5 = file_doc5
from cassandra.cluster import Cluster
cluster = Cluster(['172.17.0.2'])
sesi = cluster.connect()
sesi.execute("USE project")
Dokumen.objects( prodi=request.form['jurusan_mhs'],
nim=request.form['nim_mhs']).update(
nama_mhs=request.form['nama_mhs'],
judul=request.form['judul_doc']
)
kata_kunci=request.form['kunci_kata'],
intisari=request.form['intisari_doc'],
pembimbing=request.form['pembimbing_mhs']
delet = DELETE FROM dokumen WHERE prodi ="Teknik Informatika" AND nim=14231112;
@app.route('/respon_delet', methods=['GET','POST'])
def respon_delet():
return render_template("data_doc.html", delet=delet)
if 'logged_in' is session:
return redirect(url_for('login'))
if request.method == 'POST':
result = sesi.execute("SELECT COUNT(1) FROM user_app WHERE nip= %s AND jabatan= %s AND password = %s ALLOW FILTERING ;", ((int(request.form['nip'])), request.form['jabatan'], request.form['password'] ))
if result == 1:
session['logged_in'] = True
return redirect(url_for('admin'))
else:
error = "Ulangi !!!"
return render_template('login.html', error=error)
data_nip = sesi.execute(" SELECT nip FROM user_app WHERE jabatan = %s AND nip = %s ;", (request.form['jabatan'], (int(request.form['nip']))))
data = sesi.execute_async("SELECT COUNT(1) FROM user_app WHERE nip= %s AND jabatan= %s AND password = %s ALLOW FILTERING ;", ((int(request.form['nip'])), request.form['jabatan'], request.form['password'] ))
rows = data.result()
nip_form = request.form['nip']
jabatan_form = request.form['jabatan']
password_form = request.form['password'] | {
"repo_name": "Ahmad31/Web_Flask_Cassandra",
"path": "backup_code.py",
"copies": "1",
"size": "6284",
"license": "apache-2.0",
"hash": -8459540921603265000,
"line_mean": 26.9333333333,
"line_max": 207,
"alpha_frac": 0.6359007002,
"autogenerated": false,
"ratio": 2.865481076151391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8948847350659792,
"avg_score": 0.010506885138319915,
"num_lines": 225
} |
# approved senders: https://support.google.com/a/answer/2368132
password = raw_input('Enter the admin password: ')
import smtplib
import csv
import time
csvFile = 'userAccountsToEmail2.csv'
logFile = open('userAccountsThatHaveBeenEmailed.csv', 'a') #open a log file that we'll append to
totalRows = sum(1 for row in open(csvFile, 'rb')) #count how many rows there are in the CSV file
print 'There are ', totalRows, ' entries in ', csvFile
countDown = totalRows #a variable we'll decrement as a count down to completion
currentRow = 0 #for keeping track of where we are in the CSV file
def buildEmail(recipient, firstName):
subject = firstName + ', your account will soon be archived'
paragraph1 = 'In case you have not already heard at school, this account (' + recipient + ') will be archived (and no longer available) on October 31st.<p>'
paragraph2 = 'For information on how to move your data to your new account, see <a href="http://is.eips.ca/about/school-news/post/transferring-data-to-a-new-google-account">http://is.eips.ca/about/school-news/post/transferring-data-to-a-new-google-account</a>.<p>'
paragraph3 = 'Please start using your new account as soon as possible. New student accounts follow the pattern firstname lastinitial and two-digit grad year (e.g. davidh26).<p>'
paragraph4 = 'For more information, contact your school Google Administrator.'
body = paragraph1 + paragraph2 + paragraph3 + paragraph4
headers = "\r\n".join(["from: " + adminEmail, "subject: " + subject, "to: " + recipient, "mime-version: 1.0", "content-type: text/html"])
content = headers + "\r\n\r\n" + body #join everything together in to a single variable
return content
def sendEmail(adminEmail, password, recipient, content):
session = smtplib.SMTP('smtp.gmail.com', 587)
session.ehlo()
session.starttls()
session.login(adminEmail, password) #log in to the session
session.sendmail(adminEmail, recipient, content) #send the email
return 'email sent to ' + recipient
importFile = open(csvFile, 'rb') #(re)open the CSV file that we want to parse (since totalRows already looped through it)
reader = csv.reader(importFile) #we'll read the CSV file with this
for row in reader: #the loop that reads through the CSV file we mentioned earlier
recipient = row[0] #the first entry on this row is the email address
domain = row[1]
firstName = row[2]
if domain != 'Domain': #meaning that we are not looking at the header row
adminEmail = 'is@' + domain
content = buildEmail(recipient, firstName)
print sendEmail(adminEmail, password, recipient, content)
print countDown, '(row number ', currentRow, ' completed)'
logFile.write(time.strftime('%Y-%m-%d_%H%M%S'))
logFile.write(',')
logFile.write(recipient)
logFile.write('\n')
currentRow += 1 #increment the currentRow variable
countDown -= 1 #decrement the countDown variable
#close the file(s)
importFile.close() | {
"repo_name": "misterhay/GoogleAppsProvisioning",
"path": "sendEmails.py",
"copies": "1",
"size": "2980",
"license": "unlicense",
"hash": 5241556925070116000,
"line_mean": 53.2,
"line_max": 268,
"alpha_frac": 0.7177852349,
"autogenerated": false,
"ratio": 3.6033857315598548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9742595586249048,
"avg_score": 0.015715076042161494,
"num_lines": 55
} |
# Approximate 2d posterior using PyMc3
# https://www.ritchievink.com/blog/2019/06/10/bayesian-inference-how-we-are-able-to-chase-the-posterior/
# We use the same data and model as in posteriorGrid2d.py
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import pymc3 as pm
figdir = "../figures"
import os
def save_fig(fname):
if figdir: plt.savefig(os.path.join(figdir, fname))
data = np.array([195, 182])
# lets create a grid of our two parameters
mu = np.linspace(150, 250)
sigma = np.linspace(0, 15)[::-1]
mm, ss = np.meshgrid(mu, sigma) # just broadcasted parameters
likelihood = stats.norm(mm, ss).pdf(data[0]) * stats.norm(mm, ss).pdf(data[1])
aspect = mm.max() / ss.max() / 3
extent = [mm.min(), mm.max(), ss.min(), ss.max()]
# extent = left right bottom top
prior = stats.norm(200, 15).pdf(mm) * stats.cauchy(0, 10).pdf(ss)
# Posterior - grid
unnormalized_posterior = prior * likelihood
posterior = unnormalized_posterior / np.nan_to_num(unnormalized_posterior).sum()
plt.figure()
plt.imshow(posterior, cmap='Blues', aspect=aspect, extent=extent)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.title('Grid approximation')
save_fig('bayes_unigauss_2d_grid.pdf')
plt.show()
with pm.Model():
# priors
mu = pm.Normal('mu', mu=200, sd=15)
sigma = pm.HalfCauchy('sigma', 10)
# likelihood
observed = pm.Normal('observed', mu=mu, sd=sigma, observed=data)
# sample
trace = pm.sample(draws=10000, chains=1)
pm.traceplot(trace);
plt.figure()
plt.scatter(trace['mu'], trace['sigma'], alpha=0.01)
plt.xlim([extent[0], extent[1]])
plt.ylim([extent[2], extent[3]])
plt.ylabel('$\sigma$')
plt.xlabel('$\mu$')
plt.title('MCMC samples')
save_fig('bayes_unigauss_2d_pymc3_post.pdf')
plt.show() | {
"repo_name": "probml/pyprobml",
"path": "scripts/bayes_unigauss_2d_pymc3.py",
"copies": "1",
"size": "1756",
"license": "mit",
"hash": 5949871983714751000,
"line_mean": 27.8032786885,
"line_max": 104,
"alpha_frac": 0.6884965831,
"autogenerated": false,
"ratio": 2.7961783439490446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39846749270490445,
"avg_score": null,
"num_lines": null
} |
# Approximate 2d posterior using pyro SVI
# https://www.ritchievink.com/blog/2019/06/10/bayesian-inference-how-we-are-able-to-chase-the-posterior/
# We use the same data and model as in posteriorGrid2d.py
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import pyro
import pyro.distributions as dist
import pyro.optim
from pyro.infer import SVI, Trace_ELBO
import torch
import torch.distributions.constraints as constraints
import numpy as np
figdir = "../figures"
import os
def save_fig(fname):
if figdir: plt.savefig(os.path.join(figdir, fname))
np.random.seed(0)
data = np.array([195, 182])
# lets create a grid of our two parameters
mu = np.linspace(150, 250)
sigma = np.linspace(0, 15)[::-1]
mm, ss = np.meshgrid(mu, sigma) # just broadcasted parameters
likelihood = stats.norm(mm, ss).pdf(data[0]) * stats.norm(mm, ss).pdf(data[1])
aspect = mm.max() / ss.max() / 3
extent = [mm.min(), mm.max(), ss.min(), ss.max()]
# extent = left right bottom top
prior = stats.norm(200, 15).pdf(mm) * stats.cauchy(0, 10).pdf(ss)
# Posterior - grid
unnormalized_posterior = prior * likelihood
posterior = unnormalized_posterior / np.nan_to_num(unnormalized_posterior).sum()
plt.figure()
plt.imshow(posterior, cmap='Blues', aspect=aspect, extent=extent)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.title('Grid approximation')
plt.show()
def model():
# priors
mu = pyro.sample('mu', dist.Normal(loc=torch.tensor(200.),
scale=torch.tensor(15.)))
sigma = pyro.sample('sigma', dist.HalfCauchy(scale=torch.tensor(10.)))
# likelihood
with pyro.plate('plate', size=2):
pyro.sample(f'obs', dist.Normal(loc=mu, scale=sigma),
obs=torch.tensor([195., 185.]))
def guide():
# variational parameters
var_mu = pyro.param('var_mu', torch.tensor(180.))
var_mu_sig = pyro.param('var_mu_sig', torch.tensor(5.),
constraint=constraints.positive)
var_sig = pyro.param('var_sig', torch.tensor(5.))
# factorized distribution
pyro.sample('mu', dist.Normal(loc=var_mu, scale=var_mu_sig))
pyro.sample('sigma', dist.Chi2(var_sig))
pyro.clear_param_store()
pyro.enable_validation(True)
svi = SVI(model, guide,
optim=pyro.optim.ClippedAdam({"lr":0.01}),
loss=Trace_ELBO())
# do gradient steps
c = 0
for step in range(1000):
c += 1
loss = svi.step()
if step % 100 == 0:
print("[iteration {:>4}] loss: {:.4f}".format(c, loss))
sigma = dist.Chi2(pyro.param('var_sig')).sample((10000,)).numpy()
mu = dist.Normal(pyro.param('var_mu'), pyro.param('var_mu_sig')).sample((10000,)).numpy()
plt.figure()
plt.scatter(mu, sigma, alpha=0.01)
plt.xlim([extent[0], extent[1]])
plt.ylim([extent[2], extent[3]])
plt.ylabel('$\sigma$')
plt.xlabel('$\mu$')
plt.title('VI samples')
save_fig('bayes_unigauss_2d_pyro_post.pdf')
plt.show() | {
"repo_name": "probml/pyprobml",
"path": "scripts/bayes_unigauss_2d_pyro.py",
"copies": "1",
"size": "2943",
"license": "mit",
"hash": 7117855916930375000,
"line_mean": 29.3505154639,
"line_max": 104,
"alpha_frac": 0.6530750934,
"autogenerated": false,
"ratio": 2.9817629179331306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41348380113331307,
"avg_score": null,
"num_lines": null
} |
"""Approximate bilateral rank filter for local (custom kernel) mean.
The local histogram is computed using a sliding window similar to the method
described in [1]_.
The pixel neighborhood is defined by:
* the given structuring element
* an interval [g-s0, g+s1] in greylevel around g the processed pixel greylevel
The kernel is flat (i.e. each pixel belonging to the neighborhood contributes
equally).
Result image is 8-/16-bit or double with respect to the input image and the
rank filter operation.
References
----------
.. [1] Huang, T. ,Yang, G. ; Tang, G.. "A fast two-dimensional
median filtering algorithm", IEEE Transactions on Acoustics, Speech and
Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18.
"""
import numpy as np
from ... import img_as_ubyte
from ..._shared.utils import assert_nD
from . import bilateral_cy
from .generic import _handle_input
__all__ = ['mean_bilateral', 'pop_bilateral', 'sum_bilateral']
def _apply(func, image, selem, out, mask, shift_x, shift_y, s0, s1,
out_dtype=None):
assert_nD(image, 2)
image, selem, out, mask, max_bin = _handle_input(image, selem, out, mask,
out_dtype)
func(image, selem, shift_x=shift_x, shift_y=shift_y, mask=mask,
out=out, max_bin=max_bin, s0=s0, s1=s1)
return out.reshape(out.shape[:2])
def mean_bilateral(image, selem, out=None, mask=None, shift_x=False,
shift_y=False, s0=10, s1=10):
"""Apply a flat kernel bilateral filter.
This is an edge-preserving and noise reducing denoising filter. It averages
pixels based on their spatial closeness and radiometric similarity.
Spatial closeness is measured by considering only the local pixel
neighborhood given by a structuring element.
Radiometric similarity is defined by the greylevel interval [g-s0, g+s1]
where g is the current pixel greylevel.
Only pixels belonging to the structuring element and having a greylevel
inside this interval are averaged.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
s0, s1 : int
Define the [s0, s1] interval around the greyvalue of the center pixel
to be considered for computing the value.
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
See also
--------
skimage.filters.denoise_bilateral for a Gaussian bilateral filter.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import mean_bilateral
>>> img = data.camera().astype(np.uint16)
>>> bilat_img = mean_bilateral(img, disk(20), s0=10,s1=10)
"""
return _apply(bilateral_cy._mean, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y, s0=s0, s1=s1)
def pop_bilateral(image, selem, out=None, mask=None, shift_x=False,
shift_y=False, s0=10, s1=10):
"""Return the local number (population) of pixels.
The number of pixels is defined as the number of pixels which are included
in the structuring element and the mask. Additionally the must have a
greylevel inside the interval [g-s0, g+s1] where g is the greyvalue of the
center pixel.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
s0, s1 : int
Define the [s0, s1] interval around the greyvalue of the center pixel
to be considered for computing the value.
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage.morphology import square
>>> import skimage.filters.rank as rank
>>> img = 255 * np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint16)
>>> rank.pop_bilateral(img, square(3), s0=10, s1=10)
array([[3, 4, 3, 4, 3],
[4, 4, 6, 4, 4],
[3, 6, 9, 6, 3],
[4, 4, 6, 4, 4],
[3, 4, 3, 4, 3]], dtype=uint16)
"""
return _apply(bilateral_cy._pop, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y, s0=s0, s1=s1)
def sum_bilateral(image, selem, out=None, mask=None, shift_x=False,
shift_y=False, s0=10, s1=10):
"""Apply a flat kernel bilateral filter.
This is an edge-preserving and noise reducing denoising filter. It averages
pixels based on their spatial closeness and radiometric similarity.
Spatial closeness is measured by considering only the local pixel
neighborhood given by a structuring element (selem).
Radiometric similarity is defined by the greylevel interval [g-s0, g+s1]
where g is the current pixel greylevel.
Only pixels belonging to the structuring element AND having a greylevel
inside this interval are summed.
Note that the sum may overflow depending on the data type of the input
array.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
s0, s1 : int
Define the [s0, s1] interval around the greyvalue of the center pixel
to be considered for computing the value.
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
See also
--------
skimage.filters.denoise_bilateral for a Gaussian bilateral filter.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import sum_bilateral
>>> img = data.camera().astype(np.uint16)
>>> bilat_img = sum_bilateral(img, disk(10), s0=10, s1=10)
"""
return _apply(bilateral_cy._sum, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y, s0=s0, s1=s1)
| {
"repo_name": "warmspringwinds/scikit-image",
"path": "skimage/filters/rank/bilateral.py",
"copies": "3",
"size": "7652",
"license": "bsd-3-clause",
"hash": 3386529946054214700,
"line_mean": 33.4684684685,
"line_max": 79,
"alpha_frac": 0.6317302666,
"autogenerated": false,
"ratio": 3.5393154486586496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 222
} |
# approximate fbp alg.
from pylab import *
import numpy
ion()
convolve = numpy.convolve
import tomo2D as tomo
import time
from phantoms_tomo2D import generate_shepp_logan
phshepp = generate_shepp_logan()
print '''This program is designed to run in an ipython shell with matplotlib.
From terminal command line run:
ipython --pylab
From within the ipython shell execute:
run fanbeam_fbp_test.py
'''
nx0 = 512
ny0 = 512
xlen0 = 2.
ylen0 = 2.
x00 = -1.
y00 = -1.
fbp_image = tomo.image2D(shape=(nx0,ny0),x0=x00,xlen=xlen0,y0=y00,ylen=ylen0)
image = tomo.image2D(shape=(nx0,ny0),x0=x00,xlen=xlen0,y0=y00,ylen=ylen0)
print "embedding phantom in image"
phshepp.collapse_to(image)
ns0 = 512
nu0 = 512
slen0 = 2.*pi
ulen0 = 4.
s00 = 0.
u00 = -2.
source_rad = 5.
source_det = 8.
du = ulen0/nu0
dup = du*source_rad/source_det
# from kak-slaney
def ramp_kernel(np,du):
it_is_even = 0
if mod(np,2)==0:
it_is_even = 1
if it_is_even:
filter = arange(-np/2., np/2., 1.)
filter = ((-1.)**filter)/(2.*du*du*pi*filter+ du*du*pi)- 1./((8.*du*du)*(pi*filter/2. +pi/4.)**2)
else:
filter = arange(-(np-1)/2., (np-1)/2. + 1., 1.)
filter[(np-1)/2+1::2] = -1./(pi*du*filter[(np-1)/2+1::2])**2
filter[(np-1)/2-1::-2] = -1./(pi*du*filter[(np-1)/2-1::-2])**2
filter[(np-1)/2::2] = 0.
filter[(np-1)/2::-2] = 0.
filter[(np-1)/2] = 1./(4.*du*du)
return filter/2.
sino = tomo.sinogram2D(\
config_name='circular_fan',\
parms={"source_to_detector":source_det,\
"radius" :source_rad},\
shape=(ns0,nu0),\
s0=s00,slen=slen0, u0 = u00, ulen = ulen0)
work_sino = tomo.sinogram2D(\
config_name='circular_fan',\
parms={"source_to_detector":source_det,\
"radius" :source_rad},\
shape=(ns0,nu0),\
s0=s00,slen=slen0, u0 = u00, ulen = ulen0)
phshepp.project_to(sino)
rk = ramp_kernel(2*nu0-1,dup) *dup
data_weight = arange( u00 + du/2., u00+ du/2. +ulen0, du)
data_weight *= source_rad/source_det
data_weight = source_rad/sqrt(source_rad**2 + data_weight**2)
print "filtering data..."
t0 = time.time()
for i in range(ns0):
work_sino.mat[i,:] = convolve(data_weight*sino.mat[i,:],rk,0)
print "filtering time: ",time.time()-t0
print"back projecting ..."
t0 = time.time()
work_sino.weighted_back_project_to(fbp_image,fov=1.)
print "backprojection time: ",time.time()-t0
print "fbp image available in fbp_image.mat, and reference discretized phantom is in image.mat"
print '''
example imshow command:
imshow(fbp_image.mat.transpose()[::-1],vmin = 1.0,vmax=1.05,cmap=cm.gray,interpolation='nearest')'''
| {
"repo_name": "jakobsj/how_little_data",
"path": "largescale_code/fanbeam_fbp_test.py",
"copies": "1",
"size": "2625",
"license": "mit",
"hash": 6901355039078949000,
"line_mean": 21.6293103448,
"line_max": 103,
"alpha_frac": 0.6308571429,
"autogenerated": false,
"ratio": 2.401646843549863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35325039864498625,
"avg_score": null,
"num_lines": null
} |
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=32,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth:`radius_neighbors` queries.
n_candidates : int (default = 50)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[0.069..., 0.149...],
[0.229..., 0.481...],
[0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
warnings.warn("LSHForest has poor performance and has been deprecated "
"in 0.19. It will be removed in version 0.21.",
DeprecationWarning)
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, optional (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| {
"repo_name": "BiaDarkia/scikit-learn",
"path": "sklearn/neighbors/approximate.py",
"copies": "4",
"size": "22450",
"license": "bsd-3-clause",
"hash": 7759991538327436000,
"line_mean": 39.7441016334,
"line_max": 79,
"alpha_frac": 0.5792427617,
"autogenerated": false,
"ratio": 4.3173076923076925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 551
} |
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import warnings
import numpy as np
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
from ..utils import check_random_state
from ..utils.validation import check_array
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/scikit-learn-master/sklearn/neighbors/approximate.py",
"copies": "1",
"size": "22392",
"license": "mit",
"hash": 98454337719433730,
"line_mean": 39.7868852459,
"line_max": 79,
"alpha_frac": 0.5789121115,
"autogenerated": false,
"ratio": 4.311958405545927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009478467063283185,
"num_lines": 549
} |
''' Approximate the strategy oddments for 2 person zero-sum games of perfect information.
Applies the iterative solution method described by J.D. Williams in his classic
book, The Compleat Strategyst, ISBN 0-486-25101-2. See chapter 5, page 180 for details. '''
from operator import add, neg
def solve(payoff_matrix, iterations=100):
'Return the oddments (mixed strategy ratios) for a given payoff matrix'
transpose = zip(*payoff_matrix)
numrows = len(payoff_matrix)
numcols = len(transpose)
row_cum_payoff = [0] * numrows
col_cum_payoff = [0] * numcols
colpos = range(numcols)
rowpos = map(neg, xrange(numrows))
colcnt = [0] * numcols
rowcnt = [0] * numrows
active = 0
for i in xrange(iterations):
rowcnt[active] += 1
col_cum_payoff = map(add, payoff_matrix[active], col_cum_payoff)
active = min(zip(col_cum_payoff, colpos))[1]
colcnt[active] += 1
row_cum_payoff = map(add, transpose[active], row_cum_payoff)
active = -max(zip(row_cum_payoff, rowpos))[1]
value_of_game = (max(row_cum_payoff) + min(col_cum_payoff)) / 2.0 / iterations
return rowcnt, colcnt, value_of_game
###########################################
# Example solutions to two pay-off matrices
print solve([[2,3,1,4], [1,2,5,4], [2,3,4,1], [4,2,2,2]]) # Example on page 185
print solve([[4,0,2], [6,7,1]]) # Exercise 2 number 3
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/496825_Game_theory_payoff_matrix_solver/recipe-496825.py",
"copies": "1",
"size": "1456",
"license": "mit",
"hash": 55806182586348190,
"line_mean": 41.8235294118,
"line_max": 93,
"alpha_frac": 0.6153846154,
"autogenerated": false,
"ratio": 3.1515151515151514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9195058473996978,
"avg_score": 0.014368258583634694,
"num_lines": 34
} |
"""Approximating spectral functions with tensor networks.
"""
import numpy as np
import random
import quimb as qu
from .tensor_gen import MPO_rand, MPO_zeros_like
def construct_lanczos_tridiag_MPO(A, K, v0=None, initial_bond_dim=None,
beta_tol=1e-6, max_bond=None, seed=False,
v0_opts=None, k_min=10):
"""
"""
if initial_bond_dim is None:
initial_bond_dim = 8
if max_bond is None:
max_bond = 8
if v0 is None:
if seed:
# needs to be truly random so MPI processes don't overlap
qu.seed_rand(random.SystemRandom().randint(0, 2**32 - 1))
V = MPO_rand(A.nsites, initial_bond_dim,
phys_dim=A.phys_dim(), dtype=A.dtype)
else: # normalize
V = v0 / (v0.H @ v0)**0.5
Vm1 = MPO_zeros_like(V)
alpha = np.zeros(K + 1)
beta = np.zeros(K + 2)
bsz = A.phys_dim()**A.nsites
beta[1] = bsz # == sqrt(prod(A.shape))
compress_kws = {'max_bond': max_bond, 'method': 'svd'}
for j in range(1, K + 1):
Vt = A.apply(V, compress=True, **compress_kws)
Vt.add_MPO(-beta[j] * Vm1, inplace=True, compress=True, **compress_kws)
alpha[j] = (V.H @ Vt).real
Vt.add_MPO(-alpha[j] * V, inplace=True, compress=True, **compress_kws)
beta[j + 1] = (Vt.H @ Vt)**0.5
# check for convergence
if abs(beta[j + 1]) < beta_tol:
yield alpha[1:j + 1], beta[2:j + 2], beta[1]**2 / bsz
break
Vm1 = V.copy()
V = Vt / beta[j + 1]
if j >= k_min:
yield (np.copy(alpha[1:j + 1]),
np.copy(beta[2:j + 2]),
np.copy(beta[1])**2 / bsz)
| {
"repo_name": "jcmgray/quijy",
"path": "quimb/tensor/tensor_approx_spectral.py",
"copies": "1",
"size": "1751",
"license": "mit",
"hash": -2138345272080495600,
"line_mean": 28.1833333333,
"line_max": 79,
"alpha_frac": 0.5134209023,
"autogenerated": false,
"ratio": 2.9478114478114477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8961232350111448,
"avg_score": 0,
"num_lines": 60
} |
# Approximation algorithms for joint partition and prediction (JointPartPred)
import numpy as np
from copy import copy
from scipy.sparse import csr_matrix
from snpp.utils import nonzero_edges, predict_signs_using_partition
from snpp.utils.signed_graph import matrix2graph
from snpp.utils.status import Status
def iterative_approach(g, T, k,
graph_partition_f,
budget_allocation_f,
solve_maxbalance_f,
graph_partition_kwargs={},
budget_allocation_kwargs={},
solve_maxbalance_kwargs={},
truth=None,
perform_last_partition=True):
"""
Params:
g: networkx.Graph (**mutable**)
T: target edge set (set of edges, (i, j))
the i, j order doesn't matter because it's undirected
k: partition number
graph_partition_f: method for graph partitioning
budget_allocation_f: budget allocation method
solve_maxbalance_f: method for approximating the max balance problem
truth: set of (i, j, s), the ground truth for targets
for debugging purpose
Returns:
C: partition, partition label array, 1xn
predictions: list of (i, j, sign)
status:
"""
T = set(T)
remaining_targets = copy(T)
if truth:
edge2true_sign = {}
for n1, n2, v in truth:
assert v != 0
edge2true_sign[(n1, n2)] = v
solve_maxbalance_kwargs['edge2true_sign'] = edge2true_sign
status = Status()
iter_n = 0
all_predictions = []
while len(remaining_targets) > 0:
iter_n += 1
print('iteration={}, #remaining targets={}'.format(
iter_n, len(remaining_targets)))
print("graph partitioning...")
C = graph_partition_f(g, k,
**graph_partition_kwargs)
B = budget_allocation_f(C, g, iter_n, **budget_allocation_kwargs)
print("solving max_balance")
predictions = solve_maxbalance_f(g, C, B, T=remaining_targets,
**solve_maxbalance_kwargs)
all_predictions += predictions
remaining_targets -= set((i, j) for i, j, _ in predictions)
g.add_edges_from((i, j, {'weight': 1, 'sign': s})
for i, j, s in predictions)
if truth:
acc = len(truth.intersection(set(all_predictions))) / len(all_predictions)
print('Accuracy on {} predictions is {}'.format(
len(all_predictions), acc
))
status.update(predictions, acc, C)
if perform_last_partition:
C = graph_partition_f(g, k,
**graph_partition_kwargs)
if truth:
return C, all_predictions, status
else:
return C, all_predictions
def single_run_approach(g, T, k,
graph_partition_f,
graph_partition_kwargs={}):
"""
"""
C = graph_partition_f(g, k,
**graph_partition_kwargs)
preds = predict_signs_using_partition(C, targets=T)
return C, preds
| {
"repo_name": "xiaohan2012/snpp",
"path": "snpp/cores/joint_part_pred.py",
"copies": "1",
"size": "3218",
"license": "mit",
"hash": 3355528034924686000,
"line_mean": 31.18,
"line_max": 86,
"alpha_frac": 0.5515848353,
"autogenerated": false,
"ratio": 4.1576227390180875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006925400045075502,
"num_lines": 100
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.