text string | size int64 | token_count int64 |
|---|---|---|
"""Add some sphinx-docutils directives related to lessons.
"""
# pylint: disable=E701
import os
from docutils import nodes
from docutils.parsers.rst.directives.admonitions \
import Admonition as AdmonitionDirective
from sphinx.util.docutils import SphinxDirective
from sphinx.util.logging import getLogger
from . import __version__
LOG = getLogger(__name__)
# This includes a heading, to not then have
class _BaseCRDirective(AdmonitionDirective, SphinxDirective):
"""A directive to handle CodeRefinery styles
"""
# node_class = challenge
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
extra_classes = [ ]
@classmethod
def cssname(cls):
"""Return the CSS class name and Sphinx directive name.
- Remove 'Directive' from the name of the class
- All lowercase
- '_' replaced with '-'
"""
return cls.__name__.split('Directive')[0].lower().replace('_', '-')
def run(self):
"""Run the normal admonition class, but add in a new features.
title_text: some old classes had a title which was added at the
CSS level. If this is set, then this title will be added by the
directive.
"""
name = self.cssname()
self.node_class = nodes.admonition
# Some jekyll-common nodes have CSS-generated titles, some don't. The
# Admonition class requires a title. Add one if missing. The title is
# the first argument to the directive.
if len(self.arguments) == 0:
if hasattr(self, 'title_text'):
self.arguments = [self.title_text]
else:
self.arguments = [name.title()]
# Run the upstream directive
ret = super().run()
# Set CSS classes
ret[0].attributes['classes'].append(name)
ret[0].attributes['classes'].extend(self.extra_classes)
return ret
class CalloutDirective(_BaseCRDirective): pass
class ChallengeDirective(_BaseCRDirective): pass
class ChecklistDirective(_BaseCRDirective): pass
class DiscussionDirective(_BaseCRDirective): pass
class KeypointsDirective(_BaseCRDirective): pass
class ObjectivesDirective(_BaseCRDirective): pass
class PrereqDirective(_BaseCRDirective):
title_text = "Prerequisites"
class SolutionDirective(_BaseCRDirective):
extra_classes = ['dropdown'] #'toggle-shown' = visible by default
class TestimonialDirective(_BaseCRDirective): pass
class OutputDirective(_BaseCRDirective):
title_text = 'Output'
class QuestionsDirective(_BaseCRDirective): pass
class Instructor_NoteDirective(_BaseCRDirective):
title_text = "Instructor note"
# This does work, to add
# from sphinx.writers.html5 import HTML5Translator
# def visit_node(self, node):
# #import pdb ; pdb.set_trace()
# node.attributes['classes'] += [node.__class__.__name__]
# self.visit_admonition(node)
# Add our custom CSS to the headers.
def init_static_path(app):
static_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'_static'))
#print('sphinx_lesson static path:', static_path)
app.config.html_static_path.append(static_path)
def setup(app):
"Sphinx extension setup"
app.setup_extension('myst_nb')
for name, obj in globals().items():
#print(name, obj)
if (name.endswith('Directive')
and issubclass(obj, _BaseCRDirective)
and not name.startswith('_')):
#print(name, obj.cssname())
app.add_directive(obj.cssname(), obj)
# Add CSS to build
# Hint is from https://github.com/choldgraf/sphinx-copybutton/blob/master/sphinx_copybutton/__init__.py # pylint: ignore=E501
app.connect('builder-inited', init_static_path)
app.add_css_file("sphinx_lesson.css")
return {
'version': __version__,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 3,968 | 1,197 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A simple TFIDF text matcher and function to run it."""
import pickle
import random
from absl import logging
from language.serene import fever_pb2
from language.serene import types
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import tensorflow.compat.v2 as tf
import tqdm
class TextMatcher:
"""A simple TFIDF Text matcher."""
def __init__(
self,
ngram_range = (1, 2), min_df=2, max_df=.9):
"""Init parameters for text matcher.
For details, refer to
https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
since the parameters are used in calling that.
Args:
ngram_range: Tuple of n-grams to use (e.g., unigram and bigram)
min_df: Max allowed term frequency before excluding from vocab
max_df: Min required term frequency before excluding from vocab
"""
self._tfidf: Optional[TfidfVectorizer] = None
self._ngram_range = ngram_range
self._min_df = min_df
self._max_df = max_df
def train(self, sentences):
self._tfidf = TfidfVectorizer(
ngram_range=self._ngram_range,
min_df=self._min_df, max_df=self._max_df)
self._tfidf.fit(sentences)
def score(self,
claim,
candidates,
text_key = 'text'):
"""Return the score for each candidate, order does not change.
Args:
claim: The claim to match
candidates: The candidates to rank
text_key: Key in the candidate json that contains the text to score
Returns:
The score for each candidate
"""
if self._tfidf is None:
raise ValueError('You must train or load a model before predicting')
if not candidates:
return []
# make candidates indexable via numpy style indices
candidates = np.array(candidates, dtype=np.object)
# (1, vocab_size)
claim_repr = self._tfidf.transform([claim])
# (n_candidates, vocab_size)
candidates_repr = self._tfidf.transform([c[text_key] for c in candidates])
# (1, n_candidates)
product = candidates_repr.dot(claim_repr.T).T.toarray()
return product.reshape(-1).tolist()
def predict(
self,
claim, candidates,
text_key = 'text'):
"""Scores claim against candidates and returns ordered candidates.
Args:
claim: The claim to match
candidates: The candidates to rank
text_key: Key in the candidate json that contains the text to score
Returns:
sorted candidates and a score for each.
"""
if self._tfidf is None:
raise ValueError('You must train or load a model before predicting')
if not candidates:
return []
# make candidates indexable via numpy style indices
candidates = np.array(candidates, dtype=np.object)
# (1, vocab_size)
claim_repr = self._tfidf.transform([claim])
# (n_candidates, vocab_size)
candidates_repr = self._tfidf.transform([c[text_key] for c in candidates])
# (1, n_candidates)
product = candidates_repr.dot(claim_repr.T).T.toarray()
# Take the first row, since that is the only row and the one that
# contains the scores against the claim
preds = (-product).argsort(axis=1)[0]
scores = -np.sort(-product, axis=1)[0]
scores_and_candidates = []
for match_score, candidate in zip(scores, candidates[preds]):
scores_and_candidates.append((match_score, candidate))
return scores_and_candidates
def save(self, data_dir):
if self._tfidf is None:
raise ValueError('Attempted to save nonexistent model')
with tf.io.gfile.GFile(data_dir, 'wb') as f:
pickle.dump({
'tfidf': self._tfidf,
'ngram_range': self._ngram_range,
'min_df': self._min_df,
'max_df': self._max_df,
}, f)
def load(self, data_dir):
with tf.io.gfile.GFile(data_dir, 'rb') as f:
params = pickle.load(f)
self._tfidf = params['tfidf']
self._ngram_range = params['ngram_range']
self._min_df = params['min_df']
self._max_df = params['max_df']
| 4,696 | 1,506 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from setuptools import setup
CURRENT_DIR = Path(__file__).parent
def get_long_description():
readme_md = CURRENT_DIR / "README.md"
with open(readme_md, encoding="utf8") as ld_file:
return ld_file.read()
setup(
name="eksi",
version="0.0.1",
description="Komut satırında Ekşisözlük!",
long_description=get_long_description(),
long_description_content_type="text/markdown",
keywords=["ekşisözlük", "ekşi", "sözlük"],
author="Furkan Önder",
author_email="furkanonder@protonmail.com",
url="https://github.com/furkanonder/EksiGundem/",
license="MIT",
python_requires=">=3.0.0",
py_modules=["eksi"],
packages=[],
zip_safe=False,
include_package_data=True,
install_requires=["beautifulsoup4", "bs4", "colorama", "lxml"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["eksi=eksi:eksi"]},
)
| 1,099 | 406 |
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def happy_face():
print(' ')
print(bcolors.OKGREEN+' ^ ^ '+bcolors.ENDC)
print(bcolors.OKGREEN+' \__/ '+bcolors.ENDC)
print(' ')
def confused_face():
print(' ')
print(bcolors.WARNING+' o O '+bcolors.ENDC)
print(bcolors.WARNING+' ---- '+bcolors.ENDC)
print(' ')
def sad_face():
print(' ')
print(bcolors.FAIL+' > < '+bcolors.ENDC)
print(bcolors.FAIL+' ---- '+bcolors.ENDC)
print(' ')
def show_message(text):
# split up string so it fits into lines (27 character per lines)
words = text.split(' ')
lines = []
while len(words) > 0:
line = ''
for word in words:
if len(line)+len(word) > 27:
break
else:
line = line+word+' '
words = words[1:]
while len(line) < 27:
line = line + ' '
lines.append(line)
print(' ')
print(' ')
print(' ')
print(bcolors.HEADER+' /-------------------------------\ '+bcolors.ENDC)
for line in lines:
print(bcolors.HEADER+' | '+bcolors.ENDC +
line+bcolors.HEADER+' | '+bcolors.ENDC)
print(bcolors.HEADER+' \ /-----------------------/ '+bcolors.ENDC)
print(bcolors.HEADER+' \ / '+bcolors.ENDC)
print(bcolors.HEADER+' \---/ '+bcolors.ENDC)
if text.startswith('WARNING:') or text.startswith('Guess later then'):
confused_face()
elif text.startswith('ERROR:'):
sad_face()
else:
happy_face()
def show_messages(list_messages):
for message in list_messages:
show_message(message)
input(bcolors.WARNING+"Press Enter to continue..."+bcolors.ENDC)
def set_secret(json_secrets, later_then_message, message, str_level_0, str_level_1=None, str_level_2=None, str_level_3=None):
if str_level_3:
show_message(message)
json_secrets[str_level_0][str_level_1][str_level_2][str_level_3] = input()
if not json_secrets[str_level_0][str_level_1][str_level_2][str_level_3]:
json_secrets[str_level_0][str_level_1][str_level_2][str_level_3] = None
show_message(later_then_message)
elif str_level_2:
show_message(message)
json_secrets[str_level_0][str_level_1][str_level_2] = input()
if not json_secrets[str_level_0][str_level_1][str_level_2]:
json_secrets[str_level_0][str_level_1][str_level_2] = None
show_message(later_then_message)
elif str_level_1:
show_message(message)
json_secrets[str_level_0][str_level_1] = input()
if not json_secrets[str_level_0][str_level_1]:
json_secrets[str_level_0][str_level_1] = None
show_message(later_then_message)
elif str_level_0:
show_message(message)
json_secrets[str_level_0] = input()
if not json_secrets[str_level_0]:
json_secrets[str_level_0] = None
show_message(later_then_message)
return json_secrets
def set_secrets(json_secrets, later_then_message, str_set_what):
location = str_set_what.upper()
for parameter in json_secrets[location]:
if json_secrets[location][parameter] == None:
show_message(
'Please enter your '+parameter+' for '+str_set_what+' (or add it later and press Enter now)')
json_secrets[location][parameter] = input()
if not json_secrets[location][parameter]:
json_secrets[location][parameter] = None
show_message(later_then_message)
break
elif json_secrets[location][parameter] != None:
for sub_paramter in json_secrets[location][parameter]:
show_message(
'Please enter your '+parameter+' '+sub_paramter+' for '+str_set_what+' (or add it later and press Enter now)')
json_secrets[location][parameter][sub_paramter] = input()
if not json_secrets[location][parameter][sub_paramter]:
json_secrets[location][parameter][sub_paramter] = None
show_message(later_then_message)
break
return json_secrets
| 4,596 | 1,550 |
"""Module for reading the SETTINGS.json file.
"""
import json
import os
with open(os.path.dirname(os.path.realpath(__file__)) + '/SETTINGS.json') as data_file:
PATHS = json.load(data_file)
TRAIN_DATA_PATH = PATHS["TRAIN_DATA_PATH"]
TEST_DATA_PATH = PATHS["VALIDATE_DATA_PATH"]
PKL_TRAIN_DATA_PATH = PATHS["PKL_TRAIN_DATA_PATH"]
PKL_TEST_DATA_PATH = PATHS["PKL_VALIDATE_DATA_PATH"]
MODEL_PATH = PATHS["MODEL_PATH"]
SUBMISSION_PATH = PATHS["SUBMISSION_PATH"]
LOGS_PATH = PATHS["LOGS_PATH"]
INTERMEDIATE_PREDICTIONS_PATH = PATHS["INTERMEDIATE_PREDICTIONS_PATH"]
TEMP_FILES_PATH = PATHS["TEMP_FILES_PATH"]
TRAIN_PATIENT_IDS = PATHS["TRAIN_PATIENT_IDS"]
TEST_PATIENT_IDS = PATHS["TEST_PATIENT_IDS"]
SUBMISSION_NR = PATHS["SUBMISSION_NR"]
ENSEMBLE_WEIGHTS_PATH = PATHS["ENSEMBLE_WEIGHTS_PATH"]
| 795 | 344 |
# what not to do - use empty function
# race conditions may cause it not to finish right away
# but the timeout will catch it
# UPDATE: solved in d9c4fad
import sys
sys.path.append('..')
from pykron.core import Pykron, PykronLogger
import time
app = Pykron()
@app.AsyncRequest(timeout=0.5)
def fun4():
return 1
logger = PykronLogger.getInstance()
result = fun4().wait_for_completed()
print('result',result)
app.close()
| 433 | 150 |
import sys, os, itertools, operator
import datetime
import shapely
from shapely.geometry import asShape as geoj2geom
from shapely.geometry import mapping as geom2geoj
import rtree
from . import loader
class Feature:
def __init__(self, table, row, geometry):
"geometry must be a geojson dictionary or a shapely geometry instance"
self._table = table
self.row = list(row)
if isinstance(geometry, dict): geometry = geoj2geom(geometry)
self.geometry = geometry # maybe need to copy geometry?
self._cached_bbox = None
def __getitem__(self, i):
if isinstance(i, (str,unicode)):
i = self._table.fields.index(i)
return self.row[i]
def __setitem__(self, i, setvalue):
if isinstance(i, (str,unicode)):
i = self._table.fields.index(i)
self.row[i] = setvalue
@property
def bbox(self):
if not self._cached_bbox:
self._cached_bbox = self.geometry.bounds
return self._cached_bbox
def copy(self):
return Feature(self._table, self.row, self.geometry, self.bbox)
class GeoTable:
def __init__(self, filepath=None):
if filepath:
fields,rows,geometries = loader.from_file(filepath)
else:
fields,rows,geometries = [],[],[]
self.fields = fields
self.features = [Feature(self,row,geom) for row,geom in itertools.izip(rows,geometries)]
self.create_spatial_index()
def __len__(self):
return len(self.features)
def __iter__(self):
for feat in self.features:
yield feat
def __getitem__(self, i):
"""
Get one or more Features of data.
"""
return self.features[i]
@property
def bbox(self):
xmins, xmaxs, ymins, ymaxs = itertools.izip(*(feat.bbox for feat in self))
xmin, xmax = min(xmins), max(xmaxs)
ymin, ymax = min(ymins), max(ymaxs)
bbox = (xmin, ymin, xmax, ymax)
return bbox
###### SPATIAL INDEXING #######
def create_spatial_index(self):
self.spindex = rtree.index.Index()
i = 0
for feat in self:
self.spindex.insert(i, feat.bbox)#, obj=feat)
i += 1
def intersecting(self, bbox):
results = self.spindex.intersection(bbox)
return (self[item.id] for item in results)
def nearest(self, bbox):
results = self.spindex.nearest(bbox)
return (self[item.id] for item in results)
###### GENERAL #######
def save(self, savepath, **kwargs):
fields = self.fields
rowgeoms = ((feat.row,feat.geometry) for feat in self)
rows, geometries = itertools.izip(*rowgeoms)
saver.to_file(fields, rows, geometries, savepath, **kwargs)
def copy(self):
new = GeoTable()
new.fields = [field for field in self.fields]
new.features = [Feature(new,feat.row,feat.geom,feat.bbox) for feat in self.features]
new.bbox = self.bbox
new.create_spindex()
return new
###### FIELDS #######
def addfield(self, field):
self.fields.append(field)
for row in self.rows:
row.append(MISSING)
def keepfields(self, *fields):
pass
def dropfields(self, *fields):
pass
###### SELECT #######
def iter_select(self, query):
"return a generator of True False for each row's query result"
# MAYBE ALSO ADD SUPPORT FOR SENDING A TEST FUNCTION
for row in self:
# make fields into vars
for field in self.fields:
value = row[self.fields.index(field)]
if isinstance(value, (unicode,str)):
value = '"""'+str(value).replace('"',"'")+'"""'
elif isinstance(value, (int,float)):
value = str(value)
code = "%s = %s"%(field,value)
exec(code)
# run and retrieve query value
yield eval(query)
def select(self, query):
outtable = self.copy(copyrows=False)
for row,keep in zip(self,self.iter_select(query)):
if keep:
outtable.append(row)
return outtable
def exclude(self, query):
outtable = Table()
for row,drop in zip(self,self.iter_select(query)):
if not drop:
outtable.append(row)
return outtable
###### GROUP #######
def split(self, splitfields):
"""
Sharp/distinct groupings.
"""
fieldindexes = [self.fields.index(field) for field in splitfields]
temprows = sorted(self.rows, key=operator.itemgetter(*fieldindexes))
for combi,rows in itertools.groupby(temprows, key=operator.itemgetter(*fieldindexes) ):
table = self.copy(copyrows=False)
table.rows = list(rows)
table.name = str(combi)
yield table
def aggregate(self, groupfields, fieldmapping=[]):
"""
...choose to aggregate into a summary value, OR into multiple fields (maybe not into multiple fields, for that use to_fields() afterwards...
...maybe make flexible, so aggregation can be on either unique fields, or on an expression or function that groups into membership categories (if so drop membership() method)...
"""
if fieldmapping: aggfields,aggtypes = zip(*fieldmapping)
aggfunctions = dict([("count",len),
("sum",sum),
("max",max),
("min",min),
("average",stats.average),
("median",stats.median),
("stdev",stats.stdev),
("most common",stats.most_common),
("least common",stats.least_common) ])
outtable = self.copy(copyrows=False)
fieldindexes = [self.fields.index(field) for field in groupfields]
temprows = sorted(self.rows, key=operator.itemgetter(*fieldindexes))
for combi,rows in itertools.groupby(temprows, key=operator.itemgetter(*fieldindexes) ):
if not isinstance(combi, tuple):
combi = tuple([combi])
# first the groupby values
newrow = list(combi)
# then the aggregation values
if fieldmapping:
columns = zip(*rows)
selectcolumns = [columns[self.fields.index(field)] for field in aggfields]
for aggtype,values in zip(aggtypes,selectcolumns):
aggfunc = aggfunctions[aggtype]
aggvalue = aggfunc(values)
newrow.append(aggvalue)
outtable.append(newrow)
outtable.fields = groupfields
if fieldmapping: outtable.fields.extend(aggfields)
return outtable
###### CREATE #######
def compute(self, fieldname, expression, query=None):
# NOTE: queries and expressions currently do not validate
# that value types are of the same kind, eg querying if a number
# is bigger than a string, so may lead to weird results or errors.
if not fieldname in self.fields:
self.addfield(fieldname)
expression = "result = %s" % expression
for row in self:
# make fields into vars
for field in self.fields:
value = row[self.fields.index(field)]
if isinstance(value, (unicode,str)):
value = '"""'+str(value).replace('"',"'")+'"""'
elif isinstance(value, (int,float)):
value = str(value)
code = "%s = %s"%(field,value)
exec(code)
# run and retrieve expression value
if not query or (eval(query) == True):
exec(expression)
row[self.fields.index(fieldname)] = result
return self
###### CONNECT #######
def join(self, othertable, query):
"""
...
"""
pass
def relate(self, othertable, query):
"""maybe add a .relates attribute dict to each row,
with each relate dict entry being the unique tablename of the other table,
containing another dictionary with a "query" entry for that relate,
and a "links" entry with a list of rows pointing to the matching rows in the other table.
"""
pass
| 8,563 | 2,472 |
#!/usr/bin/env python3
"""
day10-2021.py - my solution to day 10 of advent of code 2021.
the link to the problem is:
https://adventofcode.com/2021/day/10
use by running `./aoc-day10-2021.py [input]`
this code was originally posted here:
https://gist.github.com/fivegrant/8e451be44b89ddcfe63e46532bf18821
"""
# Snag Data \ stole this from my day 9 code
import sys
data_path = sys.argv[1]
with open(data_path) as f:
raw_data = f.readlines()
data = [x.strip("\n") for x in raw_data]
def inverted(mapping):
return {value: key for key, value in mapping.items()}
pairs = {
"{": "}",
"[": "]",
"(": ")",
"<": ">"
}
point_values = {
"}": 1197,
"]": 57,
")": 3,
">": 25137
}
auto_values = {
"{": 3,
"[": 2,
"(": 1,
"<": 4
}
class Stack:
def __init__(self, string):
self.pile = ""
self.string = string
self.position = 0
def step(self):
if self.position >= len(self.string): return -1
current = self.string[self.position]
if current in pairs:
self.pile += current
self.position += 1
return 0
elif pairs[self.pile[-1]] == current:
self.pile = self.pile[:-1]
self.position += 1
return 0
else: # ERROR!
return point_values[current]
def autocomplete(self):
points = 0
while points == 0:
points = self.step()
points = 0
for s in reversed(self.pile):
points *= 5
points += auto_values[s]
return points
def score(line):
stack = Stack(line)
points = 0
while points == 0:
points = stack.step()
return points if points != -1 else 0
corrupted = [score(x) for x in data]
incomplete = [data[i] for i in range(len(corrupted)) if corrupted[i] == 0]
# Part I
print(f'part i: {sum(corrupted)}')
# Part II
completed = sorted([Stack(x).autocomplete() for x in incomplete])
print(f'part ii: {completed[len(completed)//2]}')
| 1,970 | 757 |
# Copyright(c) 2019-2020 Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable-all
import matplotlib.pyplot as plt
import math
import pickle
import numpy as np
def dot(l1=[], l2=[]):
""" computes dot product of 2 vectors"""
sum1 = 0
for i in range(0, len(l1)):
sum1 = (l1[i] * l2[i]) + sum1
return sum1
# Function to compute 2-norm of a list
def norm(l1=[]):
""" computes norm of 2 vectors"""
d = 0
for i in range(0, len(l1)):
d = d + ((l1[i]) ** 2)
return math.sqrt(d)
# Function to compute cosine similarity index of two lists
def cosine_similarity(l1=[], l2=[]):
""" computes cosine similarity"""
dot_product = dot(l1, l2)
norm_a = norm(l1)
norm_b = norm(l2)
denom = norm_a * norm_b
if (denom != 0):
out = round(float(dot_product) / (denom), 4)
else:
out = 0
return out
def hammingSimilarity(l1=[], l2=[]):
""" computes hamming similarity """
hammingD = 0
nsensors = len(l1) # using the total number of non-zero sensors per odor
nNonZero = len(l1)
# nsensors = len(l1)
for i in range(0, nsensors):
if l1[i] != l2[i]:
hammingD += 1
if l1[i] == 0 and l2[i] == 0:
nNonZero = nNonZero - 1
# ratio = float(hammingD)/nNonZero
ratio = float(hammingD) / nsensors
hammingS = round(1 - ratio, 2)
return hammingS
def jaccardSimilarity(l1=[], l2=[]):
""" computes Jaccard similarity"""
list1 = []
list2 = []
for i in range(0, len(l1)):
list1.append((i, l1[i]))
list2.append((i, l2[i]))
set1 = set(list1)
set2 = set(list2)
intersectionSize = len(set.intersection(set1, set2))
unionSize = len(set.union(set1, set2))
# print intersectionSize, unionSize;
return round(intersectionSize/float(unionSize), 4)
def computeSimilarity(l1, l2):
""" computes similarity index """
return jaccardSimilarity(l1, l2)
def findPrediction(SImatrix_gamma, nACh=1, pThreshold=0.75):
""" computes the correct classifications"""
pValues = []
pValuesNaive = []
maxSI = 0
maxSIindex = 'x'
maxSInaive = 0
maxSInaiveIndex = 'x'
k = 0
gammaIndex = 0
AChCnt = [0] * nACh # counts number of correct classifications at each ACh level
for i in range(0, len(SImatrix_gamma)):
for j in range(0, len(SImatrix_gamma[i])):
if (SImatrix_gamma[i][j] > maxSI):
maxSI = SImatrix_gamma[i][j]
maxSIindex = j
AChID = k // 10
if (gammaIndex == 0):
maxSInaive = SImatrix_gamma[i][j]
maxSInaiveIndex = j
k += 1
if (k == 10 * nACh):
if (maxSI >= pThreshold):
pValues.append(maxSIindex)
AChCnt[AChID] += 1
else:
pValues.append('x')
# if(maxSInaive>=pThreshold):
if (maxSInaive >= pThreshold):
pValuesNaive.append(maxSInaiveIndex)
else:
pValuesNaive.append('x')
maxSI = 0
maxSIindex = 'x'
maxSInaive = 0
maxSInaiveIndex = 'x'
k = 0
gammaIndex += 1
if (gammaIndex == 10):
gammaIndex = 0
return pValues, AChCnt, pValuesNaive
def computeClassification(pValues, nTestPerOdor, nodors):
""" computes the classification accuracy"""
currentOdorId = 0
k = 0
percentCorrect = 0
for i in range(0, len(pValues)):
if (pValues[i] == currentOdorId):
percentCorrect += 1
k += 1
if (k == nTestPerOdor):
currentOdorId += 1
k = 0
return percentCorrect
def computeResults(nGammaPerTraining, trainingSetSize, testSetSize,
nsensors=72, verbose=False, gammaCode=None,
similarityThreshold=0.75):
"""evaluates the performance of the EPL network"""
nodors = trainingSetSize
nNoiseLevels = 1
nTestPerOdor = testSetSize/nodors
nACh = 1
precedenceCodeLearned = []
# this stores results from 1st gamma cycle to measure performance with naive representation
precedenceCodeNaive = []
if gammaCode is None:
pickedfilename = '/.spikes.pi'
rf = open(pickedfilename, 'rb')
precedenceCodeGamma = pickle.load(rf)
rf.close()
else:
precedenceCodeGamma = gammaCode
# Find learned precedence codes
for i in range(0, nodors):
# labelGamma = 2 * i * 2 * 5 + 2 * 5 # labeling period
labelGamma = nGammaPerTraining*nodors + 5*2*i
precedenceCodeNaive.append(precedenceCodeGamma[labelGamma])
labelGamma = labelGamma + 4 # last gamma cycle of label
# -1 because first gamma missing in simulation
precedenceCodeLearned.append(precedenceCodeGamma[labelGamma])
# Compute similarity of test odors to learned odors at every gamma
# -1 because first gamma in simulation is missing
testThetaStart = nGammaPerTraining*nodors + 5*2*nodors
SImatrix_gamma = []
SImatrix_gammaNaive = []
gammaIndex = 0
# for i in range(testThetaStart, len(precedenceCodeGamma)-1):
for i in range(testThetaStart, len(precedenceCodeGamma)):
similarityIndices = []
similarityIndicesNaive = []
for k in range(0, nodors):
# SI = cosine_similarity(precedenceCodeGamma[i], precedenceCodeLearned[k])
if (gammaIndex < 5):
SI = computeSimilarity(precedenceCodeGamma[i],
precedenceCodeLearned[k])
similarityIndices.append(SI)
# SInaive = cosine_similarity(precedenceCodeGamma[i], precedenceCodeNaive[k])
SInaive = computeSimilarity(precedenceCodeGamma[i],
precedenceCodeNaive[k])
similarityIndicesNaive.append(SInaive)
else:
similarityIndices.append(0)
similarityIndicesNaive.append(0)
gammaIndex += 1
if (gammaIndex == 10):
gammaIndex = 0
SImatrix_gamma.append(similarityIndices)
SImatrix_gammaNaive.append(similarityIndicesNaive)
# Printing
for i in precedenceCodeGamma:
# print(i[0:10])
pass
if verbose:
for i in SImatrix_gamma:
print(i)
pass
# Find predictions and compute classification of EPL results
pValues, AChCnt, pValuesNaive = findPrediction(SImatrix_gamma, nACh=nACh,
pThreshold=similarityThreshold)
for i in range(0, len(pValues)):
# print(pValues[i])
pass
percentCorrect = []
percentCorrectNaive = []
for i in range(0, nNoiseLevels):
indexStart = int(nodors * nTestPerOdor * i)
indexEnd = int(indexStart + nodors * nTestPerOdor)
percentCorrect.append(
computeClassification(pValues[indexStart:indexEnd], nTestPerOdor,
nodors=nodors))
percentCorrectNaive.append(
computeClassification(pValuesNaive[indexStart:indexEnd],
nTestPerOdor, nodors=nodors))
for i in range(0, len(percentCorrect)):
percentCorrect[i] = 100 * round(
percentCorrect[i] / float(nodors * nTestPerOdor), 2)
# Printing info
print("*****Execution Report*****")
print("{} patterns presented. {} test samples for each pattern".format(
nodors, int(nTestPerOdor)))
print("""Classification performance = {}%; for similarity threshold = {}
""".format(percentCorrect[0], similarityThreshold))
return percentCorrect[0]
| 9,264 | 3,071 |
import os
import uvicorn as uvicorn
import telegram_bot
import fastAPI_server.start_fast_api as fAPI
if __name__ == '__main__':
telegram_bot.__init__()
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
fAPI.init_elems(ROOT_DIR)
uvicorn.run(fAPI.app, host='0.0.0.0', port=8000, lifespan="on")
| 315 | 132 |
import requests
import json
from DataBase import *
def getdaydata():#爬取股票每天的价格并插入到数据库保存
durl='''http://quotes.money.163.com/service/chddata.html?code=1000001&start=20200106&end=20210106&
fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP'''
r=requests.get(durl)
data=r.text.splitlines()
n=len(data)
for i in range(n-1):
insertdayTB(data[-i-1].split(',')[0],data[-i-1].split(',')[2],data[-i-1].split(',')[6],
data[-i-1].split(',')[3],data[-i-1].split(',')[5],data[-i-1].split(',')[4])
#爬取某只股票的一天里面9:30到15:00每分钟的股票价格并插入数据库保存
#这个只能爬取当天9:30到这个时间段的数据,之前的数据请保存好
def getmindata():
url='http://pdfm.eastmoney.com/EM_UBG_PDTI_Fast/api/js?rtntype=5&id=0000012&type=r&iscr=false'
r=requests.get(url)
l=r.text.strip('(')
l1=l.strip(')')
data=json.loads(l1)
for i in data['data']:
insertminTB(i.split(',')[0], i.split(',')[1])
if __name__=='__main__':
getmindata() | 983 | 522 |
"""
Assign an annotation (and possibly a value) to a variable.
"""
from _ast import AnnAssign, Name
from typing import Optional
from src.compiler.Util import Util
from src.pyexpressions.abstract.PyExpression import PyExpression
from src.pyexpressions.concrete.PyName import PyName
from src.pyexpressions.highlevel.PyIdentifiable import PyIdentifiable
from src.scopes.objects.Type import Type
from src.scopes.objects.Variable import Variable
from src.structures.Errors import SyntaxSubsetError
from src.structures.TypeRenames import GENERIC_PYEXPR_TYPE
class PyAnnAssign(PyExpression, PyIdentifiable):
"""
Expression for assigning a variable.
"""
__type: PyName
__value: Optional[PyExpression]
def __init__(self, expression: AnnAssign, parent: GENERIC_PYEXPR_TYPE):
super().__init__(expression, parent)
# Store variable
self.set_id(Util.get_attr(expression, "target.id"))
# Get type hint
type_hint: Optional[Name] = Util.get_attr(expression, "annotation")
# Make sure type hint was passed
if type_hint is not None:
# Save it
self.__type = PyName(type_hint, self)
else:
# Raise an error if we do not have a type
raise SyntaxSubsetError("missing type")
# If a value is also being assigned
# (Then the value of expression.value will not be None)
if expression.value:
# Convert and store
self.__value = self.from_ast(Util.get_attr(expression, "value"))
else:
# Otherwise, leave as None
self.__value = None
# Create scope signature
var_scope_sig = Variable(name=self.get_id(), type=Type(self.__type.get_id()))
# Add this variable to the nearest scope
self.get_nearest_scope().declare_object(var_scope_sig)
def _transpile(self) -> str:
"""
Transpile the operation to a string.
"""
return f"{self.__type.transpile()} {self.get_id()}" + \
(f" = {self.__value.transpile()}" if self.__value else "") # Only transpile value if it exists
| 2,135 | 607 |
# -*- coding: utf-8 -*-
# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland
# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus
# This source code is licensed under the MIT license. See LICENSE in the repository root directory.
# Author(s): Otto Hylli <otto.hylli@tuni.fi> and Ville Heikkilä <ville.heikkila@tuni.fi>
'''
Helper functions used with tests.
'''
import csv
import io
import unittest
from typing import List, Dict
from falcon import testing
from LogReader.app import api
from LogReader.db.simulations import simIdAttr
class ApiTest(testing.TestCase):
'''
Super class for api tests which gets the falcon api instance.
'''
def setUp(self):
super(ApiTest, self).setUp()
self.app = api
def checkSimulations( test: unittest.TestCase, resultSims: List[dict], expectedSims: List[dict]):
'''
Check by simulation id that results and expected simulations are the same.
test: Test case which uses this so we can use its assert methods.
resultSims: List of simulations.
expectedSims: List of simulations.
'''
checkItemsById( test, simIdAttr, resultSims, expectedSims )
def checkMessages( test: unittest.TestCase, resultMsgs: List[dict], expectedMsgs: List[dict] ):
'''
Check by message id that the list of result messages matches with the list of expected messages.
'''
checkItemsById( test, 'MessageId', resultMsgs, expectedMsgs )
def checkItemsById( test: unittest.TestCase, idAttr: str, result: List[dict], expected: List[dict] ):
'''
Check by id that results and expected are the same.
test: Test case which uses this so we can use its assert methods.
idAttr: Name of attribute containing the id of the item used in comparison.
result: List of items.
expected: List of expected items.
'''
# get ids of results and expected and check they contain the same.
ids = [ item[ idAttr ] for item in result ]
expectedIds = [ item[ idAttr ] for item in expected ]
test.assertCountEqual( ids, expectedIds, 'Did not get the expected items.' )
def checkCsv( test: unittest.TestCase, result: str, expected: csv.DictReader, delimiter: str = ';' ):
'''
Check that result and expected csv contain the same data.
'''
# create a csv DictReader from result string.
result = io.StringIO( result, newline = '' )
result = csv.DictReader( result, delimiter = ';' )
# check that both have the same column titles
resultHeaders = set( result.fieldnames )
expectedHeaders = set( expected.fieldnames )
test.assertEqual( resultHeaders, expectedHeaders, 'Result and expected should have the same headers.' )
# check the csvs line by line
line = 1
for expectedRow in expected:
line += 1
try:
resultRow = next( result )
except StopIteration:
test.fail( f'No more rows in result but was expecting a row containing: {expectedRow}.' )
test.assertEqual( resultRow, expectedRow, f'Result and expected rows do not match on line {line}.' )
# result should not have more rows
with( test.assertRaises( StopIteration, msg = 'Result has more rows than expected.' )):
next( result )
def getTestDataResultFileName( testName: str, scenarioName: str, actual: bool = False, fileType: str = 'json' ) -> str:
'''
For time series test get name for a result file for given test and scenario.
Actual True gives the name of actual results file and False the expected results file.
fileType should contain the file type extension.
'''
result = 'result'
if actual:
result = 'actual_result'
# replace spaces in scenario name with underscores
scenarioName = scenarioName.replace( ' ', '_' )
return f'{testName}_{scenarioName}_{result}.{fileType}' | 3,941 | 1,090 |
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""Parse SQL statements."""
__version__ = '0.1.14'
# Setup namespace
from sqlparse import engine
from sqlparse import filters
from sqlparse import formatter
from sqlparse import lexer
from sqlparse import tokens as T
from sqlparse.engine import grouping
from sqlparse.parsers import SQLParser
# Deprecated in 0.1.5. Will be removed in 0.2.0
from sqlparse.exceptions import SQLParseError
def build_parsers():
parsers = dict()
for cls in SQLParser.__subclasses__():
parsers[cls.dialect] = cls()
return parsers
_parsers = build_parsers()
def parse(sql, encoding=None, dialect=None):
"""Parse sql and return a list of statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:param dialect: The sql engine dialect of the input sql statements.
It only supports "mysql" right now. If dialect is not specified,
The input sql will be parsed using the generic sql syntax. (optional)
:returns: A tuple of :class:`~sqlparse.sql.Statement` instances.
"""
stream = parsestream(sql, encoding, dialect)
return tuple(stream)
def parsestream(stream, encoding=None, dialect=None):
"""Parses sql statements from file-like object.
:param stream: A file-like object.
:param encoding: The encoding of the stream contents (optional).
:param dialect: The sql engine dialect of the input sql statements.
It only supports "mysql" right now. (optional)
:returns: A generator of :class:`~sqlparse.sql.Statement` instances.
"""
parser = _parsers.get(dialect)
if parser is None:
raise Exception("Unable to find parser to parse dialect ({0})."
.format(dialect))
return parser.parse(stream, encoding)
def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
In addition to the formatting options this function accepts the
keyword "encoding" which determines the encoding of the statement.
:returns: The formatted SQL statement as string.
"""
options = formatter.validate_options(options)
encoding = options.pop('encoding', None)
stream = lexer.tokenize(sql, encoding)
stream = _format_pre_process(stream, options)
stack = engine.FilterStack()
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
statements = split2(stream)
return ''.join(stack.run(statement) for statement in statements)
def _format_pre_process(stream, options):
pre_processes = []
if options.get('keyword_case', None):
pre_processes.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case', None):
pre_processes.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings', None) is not None:
pre_processes.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
return _pre_process(stream, pre_processes)
def _pre_process(stream, pre_processes):
if pre_processes:
for pre_process in pre_processes:
stream = pre_process.process(None, stream)
return stream
def split(sql, encoding=None):
"""Split *sql* into single statements.
:param sql: A string containting one or more SQL statements.
:param encoding: The encoding of the statement (optional).
:returns: A list of strings.
"""
stream = lexer.tokenize(sql, encoding)
splitter = StatementFilter()
stream = splitter.process(None, stream)
return [unicode(stmt).strip() for stmt in stream]
from sqlparse.engine.filter import StatementFilter
def split2(stream):
splitter = StatementFilter()
return list(splitter.process(None, stream))
| 4,111 | 1,174 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Login view
url(r'^$', 'ghost.views.index_view'),
url(r'^login/$', 'ghost.views.login_view'),
# Logout view
url(r'^logout/$', 'ghost.views.logout_view'),
# New post view
url(r'^newpost/$', 'ghost.views.newpost_view'),
# Save a post ( Ajax view )
url(r'^savepost/$', 'ghost.views.savepost'),
# Options view
url(r'^options/$', 'ghost.views.options_view'),
# Save option values ( Ajax view )
url(r'^saveoptions/$', 'ghost.views.saveoptions'),
# Edit a post
url(r'^editpost/(\d+)/$', 'ghost.views.editpost_view'),
# Post list view
url(r'^posts/$', 'ghost.views.posts_view'),
# Post list view - with page number
url(r'^posts/page/(\d+)/$', 'ghost.views.posts_view'),
# Delete a post
url(r'^deletepost/$', 'ghost.views.delete_post'),
# Comment management view
url(r'^comments/$', 'ghost.views.comments_view'),
# Comment management view - with page number
url(r'^comments/page/(\d+)/$', 'ghost.views.comments_view'),
# Report spam ( Ajax view )
url(r'^comments/reportspam/$', 'ghost.views.reportspam'),
# Delete a comment
url(r'^comments/delete/$', 'ghost.views.deletecomment'),
# Qiniu file upload callback
url(r'^upload/callback/$', 'ghost.views.uploadcallback'),
)
| 1,543 | 568 |
import os
import pandas as pd
from pandas import read_csv
import glob
from joblib import Parallel, delayed
import numpy as np
import argparse
def read_facescrub_img_list(folder, actor_label_txt, actress_label_txt, accept_pattern='*.jpg'):
full_names = glob.glob(os.path.join(folder, accept_pattern))
only_names = map(lambda f: os.path.splitext(
os.path.basename(f))[0], full_names)
pd_male = read_csv(actor_label_txt, sep='\t')
del pd_male['url'], pd_male['image_id'], pd_male['face_id']
pd_female = read_csv(actress_label_txt, sep='\t')
del pd_female['url'], pd_female['image_id'], pd_female['face_id']
pd_celeb = pd.concat([pd_male, pd_female], ignore_index=True)
pd_celeb = pd_celeb.drop_duplicates(
subset='sha256', keep='last').set_index('sha256')
bboxes = map(lambda k: pd_celeb.bbox[k], only_names)
return full_names, bboxes
def perform_split(args):
fnames, bboxes = read_facescrub_img_list(
args.train_folder, args.actor_label_path, args.actress_label_path, accept_pattern='*/*.jpg')
np_fnames = np.asarray(fnames)
np_bboxes = np.asarray(bboxes)
train_split = int(round(args.train_split * len(fnames)))
rng = np.random.RandomState(seed=1234)
idx = np.arange(0, len(fnames))
rng.shuffle(idx)
X = np_fnames[idx]
y = np_bboxes[idx]
df = pd.DataFrame({'name': X, 'bbox': y})
df.ix[:train_split].to_csv('train.csv', sep='\t', index=False)
df.ix[train_split + 1:].to_csv('val.csv', sep='\t', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('train_folder', help='Train folder root path')
parser.add_argument('actor_label_path', help='Path to actors label list')
parser.add_argument('actress_label_path',
help='Path to actresses label list')
parser.add_argument('-s', '--train_split',
help='Train/Val split ratio', type=float)
args = parser.parse_args()
args.train_split = 0.8 if args.train_split is None else args.train_split
perform_split(args)
| 2,094 | 764 |
class Road:
m = 25
def __init__(self, length: int, width:int):
self._length = length
self._width = width
def calculatiion(self, s: int):
print(f' road length: {self._length} m')
print(f' road width: {self._width} m')
print(f' road thikness: {s} cm')
result = self._length * self._width * self.m * s
return f'The mass of asphalt required to cover the entire road: {result / 1000} t.'
example = Road(5000, 20)
print(example.calculatiion(5)) | 508 | 175 |
from sspipe import p, px
import numpy as np
def test_scalar_rhs():
assert np.int32(1) | p(lambda x: x + 1) | (px == 2)
def test_scalar_lhs():
assert 2 | px + np.int32(1)
def test_rhs():
assert np.array([1, 2]) | p(lambda x: x.sum()) | (px == 3)
def test_rhs_px():
assert np.array([1, 2]) | (px.sum() == 3)
def test_lhs():
assert 2 | p(np.log2) | (px == 1)
def test_lhs_px():
assert 2 | np.power(px, px + 1) | (px == 8)
| 455 | 222 |
# Generated by Django 4.0.3 on 2022-03-18 15:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CachedItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=255)),
('parameters', models.CharField(blank=True, max_length=255)),
('headers', models.CharField(blank=True, max_length=255)),
('result', models.TextField(blank=True)),
('revive_moment', models.DateTimeField()),
('expiry_moment', models.DateTimeField()),
],
),
migrations.CreateModel(
name='DownloadedInterval',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_from', models.DateField()),
('date_to', models.DateField()),
],
),
migrations.CreateModel(
name='Exporter',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_code', models.CharField(max_length=128)),
('description', models.TextField(max_length=256)),
('is_active', models.BooleanField(default=True)),
('exporter_type', models.TextField(max_length=2048)),
('download_info_parameters', models.TextField(max_length=2048)),
('download_history_parameters', models.TextField(max_length=2048)),
],
),
migrations.CreateModel(
name='SourceApiActuality',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exporter_type', models.CharField(max_length=255, unique=True)),
('check_error_message', models.TextField(blank=True, null=True)),
('last_check_moment', models.DateTimeField()),
],
),
migrations.CreateModel(
name='InstrumentValue',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('moment', models.DateTimeField()),
('value', models.DecimalField(decimal_places=6, max_digits=50)),
('exporter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history_data', to='fin_storage.exporter')),
],
),
migrations.AddConstraint(
model_name='exporter',
constraint=models.UniqueConstraint(fields=('unique_code',), name='unique_code'),
),
migrations.AddField(
model_name='downloadedinterval',
name='exporter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='downloaded_intervals', to='fin_storage.exporter'),
),
migrations.AddConstraint(
model_name='cacheditem',
constraint=models.UniqueConstraint(fields=('url', 'parameters', 'headers'), name='UQ_url_param_headers'),
),
migrations.AddConstraint(
model_name='instrumentvalue',
constraint=models.UniqueConstraint(fields=('exporter', 'moment'), name='unique_exporter_moment'),
),
migrations.AddConstraint(
model_name='downloadedinterval',
constraint=models.UniqueConstraint(fields=('exporter', 'date_from'), name='unique_exporter_date_from'),
),
]
| 3,866 | 1,053 |
import datetime
import enum
class LogLevel(enum.Enum):
ERROR = 0
INFO = 1
DEBUG = 2
class LogManager:
def __init__(self, domain_name, log_level=LogLevel.INFO, output_type='text'):
self.__domain_name = domain_name
self.__output_type = output_type
self.__log_level = self.__get_log_level_type(log_level)
def log(self, message: str = None, log_level=LogLevel.INFO):
if self.__log_level.value >= self.__get_log_level_type(log_level).value:
if message:
print(f'[{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")}][{log_level.name}][{self.__domain_name}] {message}')
else:
# New line
print()
@staticmethod
def __get_log_level_type(log_level):
if isinstance(log_level, LogLevel):
return log_level
elif isinstance(log_level, int):
return LogLevel(log_level) | 935 | 297 |
# find position of `target` in subarray nums[left…right]
def binary_search(list, left, right, target):
# base case
if left > right:
return -1
mid = (left + right) // 2
if list[mid] == target:
return mid
elif list[mid] > target:
# move right pointer to the item before mid
return binary_search(list, left, mid - 1, target)
else:
# move left pointer to the item after mid
return binary_search(list, mid + 1, right, target)
my_list = [1, 3, 5, 7, 9]
right = len(my_list) - 1
left = 0
print(binary_search(my_list, left, right, 3))
print(binary_search(my_list, left, right, -1))
| 651 | 226 |
import logging
import random
from pathlib import Path
from datetime import datetime, timezone
import click
import datasets as nlp
import torch
import numpy as np
import pandas as pd
from simpletransformers.classification import ClassificationModel
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score
from sklearn.metrics import precision_recall_fscore_support, classification_report
from transformers import DistilBertTokenizerFast
from transformers import Trainer, TrainingArguments
from transformers import DistilBertForTokenClassification
from transformers import RobertaTokenizer
def new_call(self, *args, **kwargs):
return super(type(self), self).__call__(
*args, **kwargs, is_split_into_words=True
)
RobertaTokenizer.__call__ = new_call
def f1_micro(labels, preds):
return f1_score(labels, preds, average="micro")
def f1_macro(labels, preds):
return f1_score(labels, preds, average="macro")
def recall_macro(labels, preds):
return recall_score(labels, preds, average="macro")
def recall_micro(labels, preds):
return recall_score(labels, preds, average="micro")
def precision_macro(labels, preds):
return precision_score(labels, preds, average="macro")
def precision_micro(labels, preds):
return precision_score(labels, preds, average="micro")
def read_data(dataset, split):
texts = []
labels = []
for doc in dataset[split]:
texts.append(" ".join(token for token in doc["sentence"]))
labels.append(doc["emotion"])
return texts, labels
@click.command()
@click.option("--dataset", "-d", required=True)
@click.option("--mask-type", "-m", required=True)
@click.option("--role", "-r")
def cli(dataset, mask_type, role):
if mask_type in ("all", "inbandall"):
name = f"unified_{dataset}_{mask_type}"
else:
if not role:
raise click.BadParameter("Role is missing")
name = f"unified_{dataset}_{mask_type}_{role}"
dataset = nlp.load_dataset("scripts/unified-loader.py", name=name)
train_texts, train_labels = read_data(dataset, "train")
test_texts, test_labels = read_data(dataset, "test")
val_texts, val_labels = read_data(dataset, "validation")
unique_labels = set(train_labels)
label2id = {label: id for id, label in enumerate(unique_labels)}
id2label = {id: label for label, id in label2id.items()}
train_data = []
for train_text, train_label in zip(train_texts, train_labels):
train_data.append([train_text, label2id[train_label]])
train_df = pd.DataFrame(train_data)
train_df.columns = ["text", "label"]
eval_data = []
for test_text, test_label in zip(test_texts, test_labels):
eval_data.append([test_text, label2id[test_label]])
eval_df = pd.DataFrame(eval_data)
eval_df.columns = ["text", "label"]
logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
# Create a ClassificationModel
model = ClassificationModel(
"roberta",
"roberta-base",
num_labels=len(unique_labels),
args={
"reprocess_input_data": True,
"save_eval_checkpoints": False,
"save_model_every_epoch": False,
"overwrite_output_dir": True,
"num_train_epochs": 5,
"n_gpu": 3,
"learning_rate": 5e-5,
"use_early_stopping": True,
"early_stopping_patience": 3,
"manual_seed": 4,
"no_cache": True,
},
)
# Train the model
model.train_model(train_df)
# Evaluate the model
result, model_outputs, wrong_predictions = model.eval_model(
eval_df, acc=accuracy_score, f1_micro=f1_micro, f1_macro=f1_macro, recall_macro=recall_macro, recall_micro=recall_micro, precision_macro=precision_macro, precision_micro=precision_micro)
print(id2label[predictions[0]])
now = int(datetime.now().astimezone(timezone.utc).timestamp())
source = Path("outputs/eval_results.txt")
target = Path(f"results/eval-{name}-{now}.txt")
source.rename(target)
if __name__ == "__main__":
cli()
| 4,206 | 1,343 |
"Command-Line Arguments"
'''
The sys module is also where Python makes available the words typed on the command
that is used to start a Python script. These words are usually referred to as commandline
arguments and show up in sys.argv, a built-in list of strings.
'''
import sys
print(sys.argv)
'''
C:\...\PP4E\System> python testargv.py
['testargv.py']
C:\...\PP4E\System> python testargv.py spam eggs cheese
['testargv.py', 'spam', 'eggs', 'cheese']
C:\...\PP4E\System> python testargv.py -i data.txt -o results.txt
['testargv.py', '-i', 'data.txt', '-o', 'results.txt']
The last command here illustrates a common convention. Much like function arguments,
command-line options are sometimes passed by position and sometimes by
name using a “-name value” word pair.
'''
"""
Command-line arguments play the same role in programs that function arguments do
in functions: they are simply a way to pass information to a program that can vary per
program run. Because they don’t have to be hardcoded, they allow scripts to be more
generally useful.
"""
| 1,090 | 315 |
from math import floor, ceil, sqrt, log2
from libraries import Digits
class Numbers:
@staticmethod
def is_perfect_square(n: int) -> bool:
'''Returns True if "n" is a perfect square.
Explanations:
https://www.quora.com/What-is-the-quickest-way-to-determine-if-a-number-is-a-perfect-square
Args:
n (int): The number to check.
Returns:
bool: True if the number is a perfect square.
'''
perfect_ends = [0, 1, 4, 5, 6, 9]
if n % 10 in perfect_ends:
perfect_sums = [1, 4, 7, 9]
digits_sum = Digits.sum_digits(Digits.sum_digits(n))
if digits_sum in perfect_sums:
root = sqrt(n)
if floor(root) == ceil(root):
return True
return False
@staticmethod
def average(k: list[int]) -> float:
'''Returns the average value of an integers list.
Args:
k (list[int]): List of integers.
Returns:
float: The average.
'''
return sum(k) * (1 / len(k))
@staticmethod
def variance(k: list[int]) -> float:
'''Returns the variance of an integers list.
Args:
k (list[int]): List of integers.
Returns:
float: The variance.
'''
avr = Numbers.average(k)
res = 0
for elem in k:
res += (elem - avr)**2
return res
@staticmethod
def is_k_number(quadruple: list[int]) -> bool:
'''Returns True if the quadruple is a k-number (PB-791)
which is a quadruple with these properties:
- 1 <= a <= b <= c <= d <= n
- Average * 2 == Variance
Args:
quadruple (list[int]): A list of 4 integers.
Returns:
bool: True if the quadruple is a k-number.
'''
avr = Numbers.average(quadruple)
var = 0
for elem in quadruple:
var += (elem - avr)**2
if avr * 2 == var:
return True
return False
@staticmethod
def highest_power_of_two(n: int) -> int:
'''Given a number n, it returns the highest power of 2 that divides n.
Args:
n (int): The number n.
Returns:
int: The highest power of 2 such as 2^result = n.
'''
return int(log2(n & (~(n - 1))))
| 2,552 | 804 |
# Copyright 2017 Semaphore Solutions, Inc.
# ---------------------------------------------------------------------------
from collections import defaultdict
def get_parent_artifacts(lims, artifacts):
"""
Helper method to get the parent artifacts keyed to the supplied artifacts
:param LIMS lims:
:param list[Artifact] artifacts: The artifacts to get parent artifacts for
:rtype: dict[Artifact, list[Artifact]]
"""
artifact_to_parent_artifacts = defaultdict(list)
artifacts_to_batch_fetch = []
for artifact in artifacts:
if artifact.parent_step:
# Ugly list comprehension that covers pooled inputs and replicates
artifact_to_parent_artifacts[artifact] = [input_artifact for iomap in artifact.parent_step.details.iomaps
for input_artifact in iomap.inputs
if any(output.limsid == artifact.limsid for output in iomap.outputs)]
artifacts_to_batch_fetch += artifact_to_parent_artifacts[artifact]
else:
# Without a parent_step, we've reached the end of the artifact history
artifact_to_parent_artifacts[artifact] = []
if artifact_to_parent_artifacts:
lims.artifacts.batch_fetch(set(artifacts_to_batch_fetch))
return artifact_to_parent_artifacts
def get_udfs_from_artifacts_or_ancestors(lims, artifacts_to_get_udf_from, required_udfs=None, optional_udfs=None):
"""
Walks the genealogy for each artifact in the artifacts_to_get_udf_from list and gets the value for udf_name from the
supplied artifact, or its first available ancestor that has a value for the UDF.
NOTE: The method will stop the search upon reaching any pooling step.
:param LIMS lims:
:param list[Artifact] artifacts_to_get_udf_from: the list of artifacts whose ancestors should be inspected for the udf. Passed
down recursively until all artifacts have been satisfied.
:param list[str] required_udfs: The list of UDFs that *must* be found. Exception will be raised otherwise.
:param list[str] optional_udfs: The list of UDFs that *can* be found, but do not need to be.
:rtype: dict[s4.clarity.Artifact, dict[str, str]]
:raises UserMessageException: if values can not be retrieved for all required_udfs for all of the provided artifacts
"""
if not required_udfs and not optional_udfs:
raise Exception("The get_udfs_from_artifacts_or_ancestors method must be called with at least one "
"of the required_udfs or optional_udfs parameters.")
required_udfs = required_udfs or []
optional_udfs = optional_udfs or []
# Assemble the dictionaries for the internal methods
ancestor_artifact_to_original_artifact = {}
original_artifact_to_udfs = {}
for artifact in artifacts_to_get_udf_from:
ancestor_artifact_to_original_artifact[artifact] = [artifact]
original_artifact_to_udfs[artifact] = {}
for name in (required_udfs + optional_udfs):
original_artifact_to_udfs[artifact][name] = artifact.get(name, None)
artifacts_to_udfs = _get_udfs_from_ancestors_internal(
lims, ancestor_artifact_to_original_artifact, original_artifact_to_udfs)
if required_udfs:
_validate_required_ancestor_udfs(artifacts_to_udfs, required_udfs)
return artifacts_to_udfs
def _validate_required_ancestor_udfs(artifacts_to_udfs, required_udfs):
"""
Validates that all items in the artifacts_to_udfs dict have values for the required_udfs
:type artifacts_to_udfs: dict[s4.clarity.Artifact, dict[str, str]]
:type required_udfs: list[str]
:raises UserMessageException: if any artifact is missing any of the required_udfs
"""
artifacts_missing_udfs = set()
missing_udfs = set()
for artifact, udf_name_to_value in artifacts_to_udfs.items():
for required_udf in required_udfs:
if udf_name_to_value.get(required_udf) in ["", None]:
artifacts_missing_udfs.add(artifact.name)
missing_udfs.add(required_udf)
if artifacts_missing_udfs:
raise Exception("Could not get required values for udf(s) '%s' from ancestors of artifact(s) '%s'." %
("', '".join(missing_udfs), "', '".join(artifacts_missing_udfs)))
def _get_udfs_from_ancestors_internal(lims, current_artifacts_to_original_artifacts, original_artifacts_to_udfs):
"""
Recursive method that gets parent artifacts, and searches them for any udfs that have not yet been filled in
:type lims: s4.clarity.LIMS
:type current_artifacts_to_original_artifacts: dict[s4.clarity.Artifact: list[s4.clarity.Artifact]]
:param current_artifacts_to_original_artifacts: dict of the currently inspected artifact to the original artifact.
:type original_artifacts_to_udfs: dict[s4.clarity.Artifact, dict[str, str]]
:param original_artifacts_to_udfs: dict of the original artifacts to their ancestors' UDF values, which will
get filled in over the recursive calls of this method.
:rtype: dict[s4.clarity.Artifact, dict[str, Any]]
"""
current_artifacts = current_artifacts_to_original_artifacts.keys()
current_artifacts_to_parent_artifacts = get_parent_artifacts(lims, current_artifacts_to_original_artifacts.keys())
# Initialize the 'next to search' dict
next_search_artifacts_to_original_artifacts = defaultdict(list)
for current_artifact in current_artifacts:
if not current_artifacts_to_parent_artifacts[current_artifact]:
# The end of the genealogy has been reached for this artifact
continue
if current_artifact.parent_step.pooling is not None:
# Stop looking when we reach a step with pooled inputs, as ancestor artifacts would likely contain multiple
# values for the UDFs in question
continue
# Can now get a single parent artifact with confidence, as validated it
current_artifact_parent = current_artifacts_to_parent_artifacts[current_artifact][0]
for original_artifact in current_artifacts_to_original_artifacts[current_artifact]:
continue_searching = False
for udf_name, udf_value in original_artifacts_to_udfs[original_artifact].items():
# Don't overwrite values that have already been found
if udf_value is not None:
continue
found_value = current_artifact_parent.get(udf_name, None)
if found_value is None:
continue_searching = True
continue
original_artifacts_to_udfs[original_artifact][udf_name] = found_value
if continue_searching:
next_search_artifacts_to_original_artifacts[current_artifact_parent].append(original_artifact)
if next_search_artifacts_to_original_artifacts:
return _get_udfs_from_ancestors_internal(lims, next_search_artifacts_to_original_artifacts, original_artifacts_to_udfs)
return original_artifacts_to_udfs
| 7,131 | 2,076 |
import hou
from wizard.prefs.main import prefs
from wizard.vars import defaults
from wizard.tools import log
from wizard.asset import main as asset_core
from softwares.houdini_wizard.tools import *
from wizard.tools import utility as utils
import os
import traceback
import shutil
from wizard.project import wall
from wizard.signal import send_signal
import sys
logger = log.pipe_log()
prefs = prefs()
def save():
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
asset.version = prefs.asset(asset).software.get_new_version()
hou.hipFile.save(file_name=asset.file)
string_asset = asset_core.asset_to_string(asset)
os.environ[defaults._asset_var_] = string_asset
send_signal.save_request_signal(asset.file, string_asset)
def set_f_range(preroll=0):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
f_range = prefs.asset(asset).name.range
if preroll:
preroll = prefs.asset(asset).name.preroll
postroll = prefs.asset(asset).name.postroll
f_range[0] = f_range[0]-preroll
f_range[1] = f_range[1]+postroll
hou.playbar.setFrameRange(f_range[0], f_range[1])
def export(batch=None, frange=None):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if asset.extension == "hipnc":
export_hipfile()
elif asset.extension == "abc":
export_abc(batch=batch, frange=frange)
elif asset.extension == "vdb":
export_vdb(batch=batch, frange=frange)
def prepare_export():
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if asset.extension == "abc":
export_abc(prepare = 1)
elif asset.extension == "vdb":
export_vdb(prepare = 1)
def export_hipfile():
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
export_file = asset.export("{}_{}".format(asset.name, asset.variant), from_asset=asset)
hou.hipFile.save()
current_file = hou.hipFile.path()
shutil.copyfile(current_file, export_file)
wall.wall().publish_event(asset)
def export_abc(batch=None, prepare=None, frange=None):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if not batch:
abc_export_null = create_export_null_on_last_node('abc')
else:
abc_export_null = look_for_export_null('abc')
if abc_export_null:
wizard_exports_node = get_wizard_export_node()
abc_object_merge_node = create_node_without_duplicate('object_merge', 'abc_exports_object_merge', wizard_exports_node)
abc_object_merge_node.parm('objpath1').set(abc_export_null.path())
gtags_node = create_node_without_duplicate('attribcreate', 'GuerillaTags', wizard_exports_node)
gtags_node.parm('name1').set('GuerillaTags')
gtags_node.parm('class1').set('detail')
gtags_node.parm('type1').set('index')
gtags_node.parm('string1').set('{}, {}, {}, {}, {}-{}-{}-{}'.format(asset.category,
asset.name,
asset.variant,
asset.stage,
asset.category,
asset.name,
asset.variant,
asset.stage))
gtags_node.setInput(0, abc_object_merge_node)
rop_alembic_node = create_node_without_duplicate('rop_alembic', 'exports_alembic', wizard_exports_node)
rop_alembic_node.setInput(0, gtags_node)
wizard_exports_node.layoutChildren()
rop_alembic_node.parm("trange").set('normal')
if frange:
hou.playbar.setFrameRange(frange[0], frange[1])
rop_alembic_node.parm("f1").setExpression('$FSTART')
rop_alembic_node.parm("f2").setExpression('$FEND')
rop_alembic_node.parm("motionBlur").set(1)
rop_alembic_node.parm("shutter1").set(-0.2)
rop_alembic_node.parm("shutter2").set(0.2)
if batch:
rop_alembic_node.parm('lpostframe').set("python")
rop_alembic_node.parm('postframe').set(by_frame_script_to_file(80))
if not prepare:
export_file = asset.export("{}_{}".format(asset.name, asset.variant), from_asset=asset)
rop_alembic_node.parm("filename").set(export_file)
rop_alembic_node.parm("execute").pressButton()
wall.wall().publish_event(asset)
else:
logger.warning("No abc out node")
def export_vdb(batch=None, prepare=None, frange=None):
asset = asset_core.string_to_asset(os.environ[defaults._asset_var_])
if not batch:
vdb_export_null = create_export_null_on_last_node('vdb')
else:
vdb_export_null = look_for_export_null('vdb')
if vdb_export_null:
wizard_exports_node = get_wizard_export_node()
vdb_object_merge_node = create_node_without_duplicate('object_merge', 'vdb_exports_object_merge', wizard_exports_node)
vdb_object_merge_node.parm('objpath1').set(vdb_export_null.path())
rop_geometry_node = create_node_without_duplicate('rop_geometry', 'exports_vdb', wizard_exports_node)
rop_geometry_node.setInput(0, vdb_object_merge_node)
wizard_exports_node.layoutChildren()
temp_dir = utils.temp_dir()
export_path = os.path.join(temp_dir, "file.$F4.vdb")
if batch:
rop_geometry_node.parm('lpostframe').set("python")
rop_geometry_node.parm('postframe').set(by_frame_script_to_file(80))
rop_geometry_node.parm('sopoutput').set(export_path)
rop_geometry_node.parm("trange").set('normal')
if frange:
hou.playbar.setFrameRange(frange[0], frange[1])
rop_geometry_node.parm("f1").setExpression('$FSTART')
rop_geometry_node.parm("f2").setExpression('$FEND')
if not prepare:
rop_geometry_node.parm("execute").pressButton()
files_list = []
for file in os.listdir(temp_dir):
files_list.append(os.path.join(temp_dir, file))
publish_files_name = asset.export_multiple('{}_{}'.format(asset.name, asset.variant), files_list)
if batch:
print("current_task:copying output files")
sys.stdout.flush()
for file in files_list:
shutil.copyfile(file, publish_files_name[files_list.index(file)])
wall.wall().publish_event(asset)
else:
logger.warning("No vdb out node")
| 6,829 | 2,214 |
import discord
from discord.ext import commands
#Helper Libraries
from datetime import datetime
import feedparser
import json
#Utils
import strings
#IRI: changed 'Server' to 'timers'
class timers(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group(name="server", pass_context=True)
async def server(self, ctx):
if ctx.invoked_subcommand is None:
await self.bot.say('```' + strings.noCommandFound + '```')
@server.command(name="time", pass_context=True, help=strings.serverTimeDescription)
async def serverTime(self, ctx):
await self.bot.say('```Server time: ' + datetime.utcnow().strftime("%I:%M%p") + '```')
@server.command(name="news", pass_context=True, help=strings.releaseNotesDescription)
async def releaseNotes(self, ctx):
newestPost = feedparser.parse('https://www.guildwars2.com/en/feed')
await self.bot.say(newestPost.entries[0]['link'])
def setup(bot):
bot.add_cog(Server(bot))
| 999 | 320 |
import sys
import os.path
if __name__ == '__main__' and __package__ is None:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from table import Table
from loader import load
from utils import groupby
from report_writer import RestWriter
from prettyprint import *
class Report(object):
def __init__(self, path):
with open(path, 'rt') as f:
self.raw_data = load(f)
# group by separators distribution
bysep = lambda item: item.sep_distribution
self.report = []
for sep, collection in groupby(self.raw_data, bysep).iteritems():
ret = self.split_by_distribution(collection)
self.report.append((
get_separator_title(sep),
ret
))
def get(self):
return self.report
def split_by_distribution(self, collection):
result = []
bynum = lambda item: (item.distribution_name)
tmp = groupby(collection, bynum)
for distribution_name, collection in tmp.iteritems():
res = self.split_by_parameters(distribution_name, collection)
result.append((
get_distribution_title(distribution_name),
res
))
return result
def split_by_parameters(self, distribution_name, collection):
byparam = lambda item: item.num_distribution
result = []
for key, collection in groupby(collection, byparam).iteritems():
table = self.prepare_table(collection)
ret = get_num_distribution_parameters(distribution_name, key)
result.append((
ret.title,
table,
ret.weight
))
result.sort(key=lambda row: row[-1])
return [item[:2] for item in result]
def prepare_table(self, procedures):
keyfun = lambda item: (item.size, item.loops)
tmp = groupby(procedures, keyfun)
data = []
for (size, loops), items in tmp.iteritems():
def get_time(procedure):
for item in items:
if item.procedure == procedure:
return item.time
raise KeyError("Procedure '%s' not found" % procedure)
data.append((
size,
loops,
get_time("scalar"),
get_time("sse"),
get_time("sse-block"),
))
data.sort(key=lambda t: t[0]) # sort by size
t = Table()
t.add_header([("input", 2), "scalar", ("SSE", 2), ("SSE block", 2)])
t.add_header(["size [B]", "loops", "time [us]", "time [us]", "speed-up", "time [us]", "speed-up"])
for item in data:
t0 = item[2]
t1 = item[3]
t2 = item[4]
if t0 < 10 and t1 < 10 and t2 < 10:
# don't fool people when all measurements are single-digit numbers
speedup_sse = '---'
speedup_sse_block = '---'
else:
speedup_sse = '%0.2f' % (float(t0)/t1)
speedup_sse_block = '%0.2f' % (float(t0)/t2)
t.add_row([
'{:,}'.format(item[0]),
'%d' % item[1],
'%d' % item[2],
'%d' % item[3],
speedup_sse,
'%d' % item[4],
speedup_sse_block,
])
return t
def main():
report = Report(sys.argv[1])
writer = RestWriter(sys.stdout, report.get())
try:
restsection = sys.argv[2]
except IndexError:
restsection = "-~#"
writer.write(restsection)
if __name__ == '__main__':
main()
| 3,749 | 1,103 |
from .bind_annotation import blueprint
from .bind_annotation import delete_route
from .bind_annotation import get_route
from .bind_annotation import head_route
from .bind_annotation import patch_route
from .bind_annotation import post_route
from .bind_annotation import put_route
from .bind_annotation import route
from .context import WebApplicationContext
from .cors import CorsFilter
from .errors import RequestValidationError
from .filter_annotation import after_request
from .filter_annotation import app_after_request
from .filter_annotation import app_before_request
from .filter_annotation import app_error_handler
from .filter_annotation import before_request
from .filter_annotation import error_handler
from .request_annotation import CookieValue
from .request_annotation import FilePart
from .request_annotation import FormValue
from .request_annotation import PathVariable
from .request_annotation import RequestBody
from .request_annotation import RequestHeader
from .request_annotation import RequestParam
from .request_filter import RequestFilter
from .request_filter import RequestFilterChain
| 1,110 | 251 |
#coding: utf-8
import os
import time
def get_version_string(dev = False, safe = True):
version = ["SDT-"]
try:
version.append(os.environ['GITLAB_TAG'].replace(".", ""))
except:
version.append("400")
try:
version.append(os.environ['CI_COMMIT_SHA'][:8])
except:
version.append("M")
try:
version.append(os.environ['CI_JOB_ID'][:8])
except:
version.append(str(int(time.time()))[-8:])
if dev:
version.append("DEV")
if safe:
return "-".join(version)
else:
return ".".join(version) | 508 | 218 |
import base64
def get_result(result):
stream = base64.decodebytes(result)
return stream.decode()
| 110 | 36 |
"""Integration with Astoria."""
import asyncio
import logging
from json import JSONDecodeError, loads
from pathlib import Path
from typing import Match, NamedTuple, Optional
from astoria.common.broadcast_event import StartButtonBroadcastEvent
from astoria.common.consumer import StateConsumer
from astoria.common.messages.astmetad import Metadata, MetadataManagerMessage
from astoria.common.messages.astprocd import ProcessManagerMessage
from astoria.common.mqtt.broadcast_helper import BroadcastHelper
LOGGER = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
class GetMetadataResult(NamedTuple):
"""Result returned from fetching metadata from astoria."""
metadata: Metadata
usb_path: Path
class GetMetadataConsumer(StateConsumer):
"""Astoria consumer to fetch metadata."""
name = "sr-robot3-metadata"
def _setup_logging(self, verbose: bool, *, welcome_message: bool = True) -> None:
"""Use the logging from sr-robot3."""
# Suppress INFO messages from gmqtt
logging.getLogger("gmqtt").setLevel(logging.WARNING)
def _init(self) -> None:
"""Initialise consumer."""
self._metadata_message: Optional[MetadataManagerMessage] = None
self._proc_message: Optional[ProcessManagerMessage] = None
self._state_lock = asyncio.Lock()
self._mqtt.subscribe("astmetad", self._handle_astmetad_message)
self._mqtt.subscribe("astprocd", self._handle_astprocd_message)
async def _handle_astmetad_message(
self,
match: Match[str],
payload: str,
) -> None:
"""Handle astmetad status messages."""
async with self._state_lock:
try:
message = MetadataManagerMessage(**loads(payload))
if message.status == MetadataManagerMessage.Status.RUNNING:
LOGGER.debug("Received metadata")
self._metadata_message = message
else:
LOGGER.warn("Cannot get metadata, astmetad is not running")
except JSONDecodeError:
LOGGER.error("Could not decode JSON metadata.")
if self._metadata_message is not None and self._proc_message is not None:
self.halt(silent=True)
async def _handle_astprocd_message(
self,
match: Match[str],
payload: str,
) -> None:
"""Handle astprocd status messages."""
async with self._state_lock:
try:
message = ProcessManagerMessage(**loads(payload))
if message.status == ProcessManagerMessage.Status.RUNNING:
LOGGER.debug("Received process info")
self._proc_message = message
else:
LOGGER.warn("Cannot get process info, astprocd is not running")
except JSONDecodeError:
LOGGER.error("Could not decode JSON metadata.")
if self._metadata_message is not None and self._proc_message is not None:
self.halt(silent=True)
async def main(self) -> None:
"""Main method of the command."""
await self.wait_loop()
@classmethod
def get_metadata(cls) -> GetMetadataResult:
"""Get metadata."""
gmc = cls(False, None)
metadata = Metadata.init(gmc.config)
path = Path("/dev/null")
try:
loop.run_until_complete(asyncio.wait_for(gmc.run(), timeout=0.1))
if gmc._metadata_message is not None:
metadata = gmc._metadata_message.metadata
if gmc._proc_message is not None and gmc._proc_message.disk_info is not None:
path = gmc._proc_message.disk_info.mount_path
except ConnectionRefusedError:
LOGGER.warning("Unable to connect to MQTT broker")
except asyncio.TimeoutError:
LOGGER.warning("Astoria took too long to respond, giving up.")
return GetMetadataResult(metadata, path)
class WaitForStartButtonBroadcastConsumer(StateConsumer):
"""Wait for a start button broadcast."""
name = "sr-robot3-wait-start"
def __init__(
self,
verbose: bool,
config_file: Optional[str],
start_event: asyncio.Event,
) -> None:
super().__init__(verbose, config_file)
self._start_event = start_event
def _setup_logging(self, verbose: bool, *, welcome_message: bool = True) -> None:
"""Use the logging from sr-robot3."""
# Suppress INFO messages from gmqtt
logging.getLogger("gmqtt").setLevel(logging.WARNING)
def _init(self) -> None:
"""
Initialisation of the data component.
Called in the constructor of the parent class.
"""
self._trigger_event = BroadcastHelper.get_helper(
self._mqtt,
StartButtonBroadcastEvent,
)
async def main(self) -> None:
"""Wait for a trigger event."""
while not self._start_event.is_set():
# wait_broadcast waits forever until a broadcoast, so we will use a short
# timeout to ensure that the loop condition is checked.
try:
await asyncio.wait_for(self._trigger_event.wait_broadcast(), timeout=0.1)
self._start_event.set()
except asyncio.TimeoutError:
pass
| 5,376 | 1,467 |
#! /usr/bin/env python
# encoding: utf-8
from aoc2017.day_16 import permutations_1
def test_permutations_1_1():
input_ = "s1,x3/4,pe/b"
output = "baedc"
assert permutations_1(input_, num_programs=5) == output
| 224 | 97 |
#!/usr/bin/env python
# Created by Adam Melton (.dok) referenceing https://bitmessage.org/wiki/API_Reference for API documentation
# Distributed under the MIT/X11 software license. See the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
import sys
import os
import json
import getopt
import xmlrpclib
import hashlib
from time import strftime,gmtime
import sqlite3 as lite
database = 'bmModerator.db'
def validAddress(address):
address = format_address(address)
address_information = api.decodeAddress(address)
address_information = eval(address_information)
if 'success' in str(address_information.get('status')).lower():
return True
else:
return False
def create_db():
try:
os.remove(database)
time.sleep(1)
except Exception as e:
pass
try:
con = lite.connect(database)
cur = con.cursor()
cur.execute("CREATE TABLE api_config(id INTEGER PRIMARY KEY, api_port TEXT,api_address TEXT,api_username TEXT, api_password TEXT, global_admin_bm_address TEXT)")
cur.execute("INSERT INTO api_config VALUES (?,?,?,?,?,?)",('0','8442','127.0.0.1','apiUser','apiPass',' '))
cur.execute("CREATE TABLE bm_addresses_config(id INTEGER PRIMARY KEY, bm_address TEXT, label TEXT, enabled TEXT, motd TEXT,whitelisted TEXT, max_msg_length INT, echo_address TEXT)")
cur.execute("CREATE TABLE users_config(id INTEGER PRIMARY KEY, ident_bm_address TEXT, usr_bm_address TEXT, nickname TEXT, admin_moderator TEXT, whitelisted_blacklisted TEXT)")
cur.execute("CREATE TABLE command_history(id INTEGER PRIMARY KEY, ident_bm_address TEXT, usr_bm_address TEXT, date_time TEXT, command TEXT, message_snippet TEXT)")
cur.execute("CREATE TABLE stats(id INTEGER PRIMARY KEY, date_day TEXT, bm_address TEXT, num_sent_broadcasts INT, num_sent_messages INT)")
cur.execute("CREATE TABLE filter(id INTEGER PRIMARY KEY, banned_text TEXT)")
con.commit()
cur.close()
except Exception as e:
print 'Failed creating database (%s):%s' % (database,e)
'''
def create_add_address_table(con,ident_address):
try:
cur = con.cursor()
#Select from table, if fail the create
cur.execute("CREATE TABLE IF NOT EXISTS ?(id INTEGER PRIMARY KEY, bm_address TEXT, nickname TEXT, admin_moderator TEXT, whitelisted_blacklisted TEXT)",(ident_address,))
cur.execute("INSERT INTO bm_addresses_config VALUES(?,?,?,?,?,?,?,?)",(None,bm_address,label,enabled,motd,whitelisted,max_msg_length,echo_address))
con.commit()
return True
except Exception as e:
print 'Failed',e
return False
'''
def api_data(con):
# Returns API url string
cur = con.cursor()
cur.execute("SELECT api_port,api_address,api_username,api_password FROM api_config")
temp = cur.fetchone()
cur.close()
if temp == None or temp == '':
print 'Data Error with API Table. Blank.'
return
else:
api_port,api_address,api_username,api_password = temp
return "http://" + str(api_username) + ":" + str(api_password) + "@" + str(api_address)+ ":" + str(api_port) + "/"
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def format_address(address_to_format):
# Removes BM- prefix if it exists
if address_to_format[:3].lower() == 'bm-':
address_to_format = address_to_format[3:]
return address_to_format
def is_global_admin(con,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
cur = con.cursor()
cur.execute("SELECT global_admin_bm_address FROM api_config WHERE id=?",('0',))
temp = cur.fetchone()
cur.close()
if temp == None or temp == '':
print 'Data Error with API Table. Blank.'
return False
else:
global_admin_bm_address = str(temp[0])
global_admin_bm_address = format_address(global_admin_bm_address)
if usr_bm_address == global_admin_bm_address:
return True
else:
return False
def is_adminPlus(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
cur.execute("SELECT id,ident_bm_address,admin_moderator,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
return False
else:
id_num,ident_bm_address,admin_moderator,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
if whitelisted_blacklisted == 'blacklisted':
return False
elif admin_moderator == 'admin+':
return True
else:
return False
def is_admin(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
cur.execute("SELECT id,ident_bm_address,admin_moderator,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
return False
else:
id_num,ident_bm_address,admin_moderator,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
if whitelisted_blacklisted == 'blacklisted':
return False
elif admin_moderator == 'admin':
return True
else:
return False
def is_moderator(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
cur.execute("SELECT id,ident_bm_address,admin_moderator,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
return False
else:
id_num,ident_bm_address,admin_moderator,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
if whitelisted_blacklisted == 'blacklisted':
return False
elif admin_moderator == 'moderator':
return True
else:
return False
def is_whitelisted(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
return False
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
if whitelisted_blacklisted == 'whitelisted':
return True
else:
return False
def is_blacklisted(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
return False
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
if whitelisted_blacklisted == 'blacklisted':
return True
else:
return False
def help_file():
message = """Help File
----------------------
User Commands
--setNick Sets your nickname. Max 32 characters
Moderator Commands
--Help This help file is sent to you as a message
--clearNick Send nickname to remove or send address to remove nickname from.
--addWhitelist Adds a user to the whitelist for this address
--remWhitelist Removes a user from the whitelist for this address
--addBlacklist Adds a user to the blacklist for this address
--remBlacklist Removes a user from the blacklist for this address
--inviteUser Sends an invitation to whitelist users for this address
--addFilter Adds the message body to filter list. Essentially a spam list. Be careful with this
--listFilters Lists all filters and their ID numbers. Use this ID to remove individual filters
--clearFilter Send filter ID in message body. If empty body, all filters are cleared
Admin Commands
--setLabel Sets the label for the mailing list
--setMOTD Sets the message of the day
--addModerator Adds a moderator to this address
--remModerator Removes a moderator from this address
--sendBroadcast Sends whatever message you type out as a broadcast from this addres
--listModerators Returns a list of Moderators and their information
--listUsers Returns a list of all non-Admin/Moderator users and their information
--enable Enables a disabled address
--disable Disable address. Prevents users from using it. Mods and Admins still have access
--setMaxLength Messages exceeding the max length are truncated. Set 0 for no max length
--getStats UTC times. Set period in message "Day"/"Month"/"Year" or "All" for all stats
--getCommandHistory Returns a command history list including who, what, and when
Admin+ Commands
--addAdmin Adds an admin for this address
--remAdmin Removed an admin from this address
--listAdmins Returns a list of Admins and their information
--setBlacklist Anyone can use this address except for Blacklisted users.
--setWhitelist Only Whitelisted users (or Moderators/Admins) can use this address
--setMailingList Makes this address send a broadcast of all messages it receives
--setEcho Makes this address reply to all messages it receives
Owner Commands
--addAdmin+ Adds an admin for this address
--remAdmin+ Removed an admin from this address
--listAdmin+ Returns a list of Admins and their information
--generateNewAddress Returns a new address that can be used. Defaults to Mailing List. Message is Label
--getInfo Lists all information about every address on this server
Send all commands as the subject and all relevant data as the message (such as an address to blacklist)
Example
----------------------
Subject = "--addModerator"
Message = "BM-2DAV89w336ovy6BUJnfVRD5B9qipFbRgmr"
----------------------
Other Information:
* Do note that all commands are logged so do not abuse your privileges."""
return message
def is_command(text_string):
# Returns true if the string is a command
command_list = ['--setNick','--setNickname','--Help','--clearNick','--addWhitelist','--remWhitelist','--addBlacklist','--remBlacklist','--inviteUser','--addFilter',
'--listFilters','--clearFilter','--setLabel','--setMOTD','--addModerator','--remModerator','--addAdmin','--remAdmin','--listAdmins',
'--sendBroadcast','--listModerators','--listUsers','--setMailingList','--setEcho','--setBlacklist','--setWhitelist','--setMaxLength',
'enable','disable','--generateNewAddress','--getStats','--getCommandHistory','--getInfo','--addAdmin+','--remAdmin+','--listAdmin+']
# Possible Future Commands
# Use API to verify address
# Set address difficulty on creation or after creation
# Ability to batch whitelist/blacklist/etc addresses? Reason not to, mass confirmation messages
# Set max difficulty to send message, probably should be hard coded at least
# TODO, don't allow moderators to perform actions on admins/admin+'s, etc
for command in command_list:
if command.lower() in text_string.lower():
return True
return False
def getInfo(con):
try:
cur = con.cursor()
date_time = strftime("%Y-%m-%d:%H",gmtime())
message = '%s Server Information' % date_time
message += ln_brk() + 50*"-" + ln_brk(2)
cur.execute("SELECT id,bm_address FROM bm_addresses_config")
addressList = []
while True:
temp = cur.fetchone()
if temp == None or temp == '':
break
else:
addressList.append(temp[1])
for address in addressList:
label,enabled,motd,whitelisted,max_msg_length,echo_address = get_bm_ident_info(con,address)
if enabled == 'enabled':
enabled_result = 'True'
else:
enabled_result = 'False'
if echo_address == 'false':
echo_address_result = 'Mailing List'
else:
echo_address_result = 'Echo Address'
if whitelisted == 'false':
whitelisted_result = 'False'
else:
whitelisted_result = 'True'
if max_msg_length == '0':
max_msg_length_result = 'No Maximum'
else:
max_msg_length_result = str(max_msg_length)
message += 'Address: %s' % str(address) + ln_brk()
message += 'Label: %s' % label + ln_brk()
message += 'Enabled: %s' % enabled_result + ln_brk()
message += 'Type: %s' % echo_address_result + ln_brk()
message += 'Whitelisted: %s' % whitelisted_result + ln_brk()
message += 'Max Length: %s' % max_msg_length + ln_brk()
message += 'MOTD: %s' % motd + ln_brk()
message += ln_brk()
message += listAdminPlus(con,address) + ln_brk(2)
message += listAdmins(con,address) + ln_brk(2)
message += listModerators(con,address) + ln_brk(2)
message += listUsers(con,address)
message += ln_brk(2)
message += getStats(con,address,'')
message += ln_brk(2)
message += getCommandHistory(con,address)
message += ln_brk(2) + 50*"#" + ln_brk(2)
message += '----- Global -----' + ln_brk()
message += listFilters(con)
return message
except Exception as e:
print 'getInfo ERROR: ',e
return ''
def get_bm_ident_info(con,ident_address):
# Returns information about a bm address (an identity)
ident_address = format_address(ident_address)
cur = con.cursor()
cur.execute("SELECT label,enabled,motd,whitelisted,max_msg_length,echo_address FROM bm_addresses_config WHERE bm_address=?",[ident_address,])
temp = cur.fetchone()
if temp == None or temp == '':
cur.execute("INSERT INTO bm_addresses_config VALUES(?,?,?,?,?,?,?,?)",(None,ident_address,'no label','enabled','','false','0','false'))
con.commit()
label,enabled,motd,whitelisted,max_msg_length,echo_address = ('no label','enabled','','false','0','false')
else:
label,enabled,motd,whitelisted,max_msg_length,echo_address = temp
cur.close
return label,enabled,motd,whitelisted,max_msg_length,echo_address
def banned_text(con,string_text):
# Returns True if passed text is in the filter table
cur = con.cursor()
cur.execute("SELECT banned_text FROM filter")
temp = cur.fetchone()
while True:
if temp == None or temp == '':
break
else:
filtered_text = str(temp)
if (filtered_text in string_text):
cur.close()
return True
cur.close()
return False
def check_if_new_msg(con):
# Returns true if there are messages in the inbox
apiurl = api_data(con)
if not apiurl: return
api = xmlrpclib.ServerProxy(apiurl)
inboxMessages = json.loads(api.getAllInboxMessages())
numMessages = len(inboxMessages['inboxMessages'])
return numMessages != 0
def process_new_message(con):
try:
apiurl = api_data(con)
if not apiurl: return
api = xmlrpclib.ServerProxy(apiurl)
inboxMessages = json.loads(api.getAllInboxMessages())
oldesMessage = 0
fromAddress = str(inboxMessages['inboxMessages'][oldesMessage]['fromAddress'])
toAddress = str(inboxMessages['inboxMessages'][oldesMessage]['toAddress'])
message = str(inboxMessages['inboxMessages'][oldesMessage]['message'].decode('base64'))
subject = str(inboxMessages['inboxMessages'][oldesMessage]['subject'].decode('base64'))
# Delete messages
msgId = inboxMessages['inboxMessages'][oldesMessage]['msgid']
api.trashMessage(msgId)
#sys.exit() # Temporary, used for dev
if banned_text(con,subject + " " + message):
print 'subject/message contains banned text'
return None
else:
toAddress = format_address(toAddress)
fromAddress = format_address(fromAddress)
return toAddress,fromAddress,message,subject
except Exception as e:
print 'process_new_message ERROR: ',e
return None
def is_address(bm_address):
return validAddress(bm_address)
# I should probably consolidate this. TODO
def nick_taken(con,ident_address,nickname):
# Returns True if a nickname is already taken
cur = con.cursor()
cur.execute("SELECT id,ident_bm_address FROM users_config WHERE nickname=?",[str(nickname),])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
return False
else:
id_num,ident_bm_address = temp
if ident_bm_address == ident_address:
return True
def generateAddress(con,label=None):
apiurl = api_data(con)
if not apiurl: return
api = xmlrpclib.ServerProxy(apiurl)
if label is None: label = 'bmModerator'
label = label.encode('base64')
try:
generatedAddress = api.createRandomAddress(label)
generatedAddress = format_address(generatedAddress)
return generatedAddress
except Exception as e:
print 'generateAddress ERROR: ',e
return None
def initalize_user(con,ident_bm_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_bm_address = format_address(ident_bm_address)
cur = con.cursor()
cur.execute("INSERT INTO users_config VALUES(?,?,?,?,?,?)",(None,ident_bm_address,usr_bm_address,'','',''))
con.commit()
cur.execute("SELECT id,ident_bm_address FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
print 'initalize_user ERROR'
break
else:
id_num,ident_address = temp
if ident_address == ident_bm_address:
return id_num
def setNick(con,ident_address,usr_bm_address,nickname):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
max_nick_length = 32
cur = con.cursor()
if (len(nickname) <= max_nick_length):
if not nick_taken(con,ident_address,nickname):
cur.execute("SELECT id,ident_bm_address FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET nickname=? WHERE id=?",[nickname,id_num])
con.commit
break
else:
id_num,ident = temp
if ident == ident_address:
cur.execute("UPDATE users_config SET nickname=? WHERE id=?",[nickname,id_num])
con.commit
break
new_message = 'Nickname successfully changed to (%s).' % str(nickname)
else:
new_message = 'Nickname (%s) already taken.' % str(nickname)
else:
new_message = 'Nickname too long. Maximum Nickname Size: %s Characters' % str(max_nick_length)
return new_message
def clearNick(con,ident_address,nick_or_address):
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(nick_or_address):
nick_or_address = format_address(nick_or_address)
cur.execute("SELECT id,ident_bm_address,nickname FROM users_config WHERE usr_bm_address=?",[nick_or_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message = 'No nickname found for user (%s).' % nick_or_address
break
else:
id_num,ident_bm_address,nickname = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET nickname=? WHERE id=?",['',id_num])
con.commit
new_message = 'Nickname (%s) successfully removed for user (%s).' % (nickname,nick_or_address)
break
else:
cur.execute("SELECT id,ident_bm_address,usr_bm_address FROM users_config WHERE nickname=?",[nick_or_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message = 'No users found with nickname (%s).' % nick_or_address
break
else:
id_num,ident_bm_address,usr_bm_address = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET nickname=? WHERE id=?",['',id_num])
con.commit
new_message = 'Nickname (%s) successfully removed for user (%s).' % (nick_or_address,usr_bm_address)
break
return new_message
def addWhiteList(con,ident_address,usr_bm_address,new_subject):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['whitelisted',id_num])
con.commit
break
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['whitelisted',id_num])
con.commit
break
new_message = 'BM-%s successfully whitelisted. An automatic message was sent to alert them.' % usr_bm_address
tmp_msg = 'This address has been whitelisted for: %s' % ident_address
send_message(con,usr_bm_address,ident_address,new_subject,tmp_msg)
else:
new_message = 'Invalid Bitmessage address: BM-%s' % usr_bm_address
return new_message
def remWhiteList(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['',id_num])
con.commit
break
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['',id_num])
con.commit
break
new_message = 'BM-%s successfully removed from whitelist.' % usr_bm_address
else:
new_message = 'Invalid Bitmessage address: BM-%s' % usr_bm_address
return new_message
def addBlackList(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['blacklisted',id_num])
con.commit
break
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['blacklisted',id_num])
con.commit
break
new_message = 'BM-%s successfully blacklisted.' % usr_bm_address
else:
new_message = 'Invalid Bitmessage address: BM-%s' % usr_bm_address
return new_message
def remBlackList(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['',id_num])
con.commit
break
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['',id_num])
con.commit
break
new_message = 'BM-%s successfully removed from blacklist.' % usr_bm_address
else:
new_message = 'Invalid Bitmessage address: BM-%s' % usr_bm_address
return new_message
def inviteUser(con,ident_address,invitee_bm_address,usr_bm_address,new_subject):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['invited',id_num])
con.commit
break
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET whitelisted_blacklisted=? WHERE id=?",['invited',id_num])
con.commit
break
new_message = 'BM-%s successfully invited to join this address.' % usr_bm_address
tmp_msg = 'This address has been invited by BM-%s to join: BM-%s. Respond with "Accept" as the subject to accept this invitation. If you have not already, be sure to subscribe to this address.' % (invitee_bm_address,ident_address)
send_message(con,usr_bm_address,ident_address,new_subject,tmp_msg)
else:
new_message = 'Invalid Bitmessage address: BM-%s' % usr_bm_address
return new_message
def addModerator(con,ident_address,invitee_bm_address,usr_bm_address,new_subject):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,admin_moderator FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['moderator',id_num])
con.commit
break
else:
id_num,ident_bm_address,admin_moderator = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['moderator',id_num])
con.commit
break
new_message = 'BM-%s successfully added to moderators. A notice was automatically sent to notify them.' % usr_bm_address
tmp_msg = 'This address has been added to the Moderator group by BM-%s for: BM-%s. Reply with the subject "--Help" for a list of commands.' % (invitee_bm_address,ident_address)
send_message(con,usr_bm_address,ident_address,new_subject,tmp_msg)
else:
new_message = 'Invalid Bitmessage address: BM-%s' % usr_bm_address
return new_message
def addAdmin(con,ident_address,invitee_bm_address,usr_bm_address,new_subject):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,admin_moderator FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['admin',id_num])
con.commit
break
else:
id_num,ident_bm_address,admin_moderator = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['admin',id_num])
con.commit
break
new_message = 'BM-%s successfully added to admins. A notice was automatically sent to notify them.' % usr_bm_address
tmp_msg = 'This address has been added to the Admin group by BM-%s for: BM-%s. Reply with the subject "--Help" for a list of commands.' % (invitee_bm_address,ident_address)
send_message(con,usr_bm_address,ident_address,new_subject,tmp_msg)
else:
new_message = 'Invalid Bitmessage address: BM-%s' % bm_address
return new_message
def addAdminPlus(con,ident_address,invitee_bm_address,usr_bm_address,new_subject):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,admin_moderator FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['admin+',id_num])
con.commit
break
else:
id_num,ident_bm_address,admin_moderator = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['admin+',id_num])
con.commit
break
new_message = 'BM-%s successfully added to Admin+. A notice was automatically sent to notify them.' % usr_bm_address
tmp_msg = 'This address has been added to the Admin+ group by BM-%s for: BM-%s. Reply with the subject "--Help" for a list of commands.' % (invitee_bm_address,ident_address)
send_message(con,usr_bm_address,ident_address,new_subject,tmp_msg)
else:
new_message = 'Invalid Bitmessage address: BM-%s' % bm_address
return new_message
# Used to remove privileges for moderators/admins/admin+s
def remPrivilege(con,ident_address,usr_bm_address):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,admin_moderator FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
id_num = initalize_user(con,ident_address,usr_bm_address)
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['',id_num])
con.commit
break
else:
id_num,ident_bm_address,admin_moderator = temp
if ident_bm_address == ident_address:
cur.execute("UPDATE users_config SET admin_moderator=? WHERE id=?",['',id_num])
con.commit
break
new_message = 'Successfully removed privileges from address: BM-%s' % bm_address
else:
new_message = 'Invalid Bitmessage address: BM-%s' % bm_address
return new_message
def ln_brk(how_many=1):
return "\n"*int(how_many)
def listAdminPlus(con,ident_address):
try:
ident_address = format_address(ident_address)
new_message = '----- List of Administrators -----'
cur = con.cursor()
cur.execute("SELECT usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted FROM users_config WHERE ident_bm_address=?",[ident_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message += ln_brk() + '----- End -----'
break
else:
usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted = temp
if admin_moderator == 'admin+':
whitelisted = 'False'
blacklisted = 'False'
if whitelisted_blacklisted == 'blacklisted':
blacklisted = 'True'
elif whitelisted_blacklisted == 'whitelisted':
whitelisted = 'True'
new_message += ln_brk() + 'BM-%s Whitelisted:%s Blacklisted:%s Nickname:%s' % (usr_bm_address,whitelisted,blacklisted,nickname)
return new_message
except Exception as e:
print 'listAdmin+ ERROR: ',e
return ''
def listAdmins(con,ident_address):
try:
ident_address = format_address(ident_address)
new_message = '----- List of Administrators -----'
cur = con.cursor()
cur.execute("SELECT usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted FROM users_config WHERE ident_bm_address=?",[ident_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message += ln_brk() + '----- End -----'
break
else:
usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted = temp
if admin_moderator == 'admin':
whitelisted = 'False'
blacklisted = 'False'
if whitelisted_blacklisted == 'blacklisted':
blacklisted = 'True'
elif whitelisted_blacklisted == 'whitelisted':
whitelisted = 'True'
new_message += ln_brk() + 'BM-%s Whitelisted:%s Blacklisted:%s Nickname:%s' % (usr_bm_address,whitelisted,blacklisted,nickname)
return new_message
except Exception as e:
print 'listAdmins ERROR: ',e
return ''
def listModerators(con,ident_address):
try:
ident_address = format_address(ident_address)
new_message = '----- List of Moderators -----'
cur = con.cursor()
cur.execute("SELECT usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted FROM users_config WHERE ident_bm_address=?",[ident_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message += ln_brk() + '----- End -----'
break
else:
usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted = temp
if admin_moderator == 'moderator':
whitelisted = 'False'
blacklisted = 'False'
if whitelisted_blacklisted == 'blacklisted':
blacklisted = 'True'
elif whitelisted_blacklisted == 'whitelisted':
whitelisted = 'True'
new_message += ln_brk() + 'BM-%s Whitelisted:%s Blacklisted:%s Nickname:%s' % (usr_bm_address,whitelisted,blacklisted,nickname)
return new_message
except Exception as e:
print 'listModerators ERROR: ',e
return ''
def listUsers(con,ident_address):
try:
ident_address = format_address(ident_address)
new_message = '----- List of Users -----'
cur = con.cursor()
cur.execute("SELECT usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted FROM users_config WHERE ident_bm_address=?",[ident_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message += ln_brk() + '----- End -----'
break
else:
usr_bm_address,nickname,admin_moderator,whitelisted_blacklisted = temp
# List everything except admins or moderators, global user (owner) is in a different table.
if admin_moderator != 'admin' and admin_moderator != 'moderator':
whitelisted = 'False'
blacklisted = 'False'
if whitelisted_blacklisted == 'blacklisted':
blacklisted = 'True'
elif whitelisted_blacklisted == 'whitelisted':
whitelisted = 'True'
new_message += ln_brk() + 'BM-%s Whitelisted:%s Blacklisted:%s Nickname:%s' % (usr_bm_address,whitelisted,blacklisted,nickname)
return new_message
except Exception as e:
print 'listUsers ERROR: ',e
return ''
def listFilters(con):
cur = con.cursor()
new_message = '----- Filter List -----' + ln_brk()
cur.execute('SELECT id,banned_text FROM filter')
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message += '----- End -----'
break
else:
id_num,banned_text = temp
new_message += 'Filter ID: %s' % id_num + ln_brk()
new_message += 'Filter Length: %s characters' % str(len(banned_text)) + ln_brk()
new_message += 'Filter Snippet [%s...]' % str(banned_text)[:64] + ln_brk()
new_message += ln_brk(2)
return new_message
def getStats(con,ident_address,time_period):
cur = con.cursor()
time_period = time_period.strip()
time_period = time_period.lower()
if time_period == 'day':
date_time = strftime("%Y-%m-%d",gmtime())
elif time_period == 'month':
date_time = strftime("%Y-%m",gmtime())
elif time_period == 'year':
date_time = strftime("%Y",gmtime())
else:
date_time = 'forever'
total_messages = 0
total_broadcasts = 0
new_message = '----- Message and Broadcast Statistics for %s UTC/GMT -----' % date_time
cur.execute('SELECT date_day,num_sent_broadcasts,num_sent_messages FROM stats WHERE bm_address=?' ,[ident_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message += ln_brk() + '----- Total Messages:%s | Total Broadcasts:%s -----' % (str(total_messages),str(total_broadcasts))
break
else:
date_day,num_sent_broadcasts,num_sent_messages = temp
if (str(date_day[:len(date_time)]) == date_time) or (date_time == 'forever'):
total_messages += num_sent_messages
total_broadcasts += num_sent_broadcasts
new_message += ln_brk() + '%s | Messages:%s | Broadcasts:%s' % (str(date_day),str(num_sent_broadcasts),str(num_sent_messages))
return new_message
def getCommandHistory(con,ident_address):
cur = con.cursor()
new_message = '----- Command History -----'
cur.execute('SELECT usr_bm_address,date_time,command,message_snippet FROM command_history WHERE ident_bm_address=?' ,[ident_address,])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
new_message += ln_brk() + '----- End -----'
break
else:
usr_bm_address,date_time,command,message_snippet = temp
new_message += ln_brk() + 'BM-%s | %s | Command:%s | Message Snippet:%s' % (str(usr_bm_address),str(date_time),str(command),str(message_snippet))
return new_message
def perform_command(con,ident_address,usr_bm_address,message,subject):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
try:
cur = con.cursor()
command = (subject.lower()).strip()
message = str(message)
subject = str(subject)
if is_global_admin(con,usr_bm_address):
usr_access_level = 4
elif is_adminPlus(con,ident_address,usr_bm_address):
usr_access_level = 3
elif is_admin(con,ident_address,usr_bm_address):
usr_access_level = 2
elif is_moderator(con,ident_address,usr_bm_address):
usr_access_level = 1
else:
# Everyone has level 0 access
usr_access_level = 0
# Log command
date_time = strftime("%Y-%m-%d:%H",gmtime())
message_snippet = message.strip()
message_snippet = message_snippet[:64] #only take the first X number of characters
cur.execute('INSERT INTO command_history VALUES(?,?,?,?,?,?)', [None,ident_address,usr_bm_address,date_time,command,message_snippet])
con.commit()
new_subject = '[BM-MODERATOR] ' + str(subject)[:16]
new_message = ''
cmd_failed = False
if (command == '--setnick' or command == '--setnickname') and usr_access_level >= 0: #Anyone can do this
new_message = setNick(con,ident_address,usr_bm_address,message)
elif command == '--help' and usr_access_level > 0:
if is_global_admin(con,usr_bm_address):
new_message = 'Your Access Level is: Owner'
new_message += ln_brk()
elif is_adminPlus(con,ident_address,usr_bm_address):
new_message = 'Your Access Level is: Admin+'
elif is_admin(con,ident_address,usr_bm_address):
new_message = 'Your Access Level is: Admin'
elif is_moderator(con,ident_address,usr_bm_address):
new_message = 'Your Access Level is: Moderator'
else:
new_message = 'Your Access Level is: User'
new_message += ln_brk() + help_file()
elif command == '--clearnick' and usr_access_level > 0:
new_message = clearNick(con,ident_address,message)
elif command == '--addwhitelist' and usr_access_level > 0:
new_message = addWhiteList(con,ident_address,message,new_subject)
elif command == '--remwhitelist' and usr_access_level > 0:
new_message = remWhiteList(con,ident_address,message)
elif command == '--addblacklist' and usr_access_level > 0:
new_message = addBlackList(con,ident_address,message)
elif command == '--remblacklist' and usr_access_level > 0:
new_message = remBlackList(con,ident_address,message)
elif command == '--inviteuser' and usr_access_level > 0:
new_message = inviteUser(con,ident_address,message,new_subject)
elif command == '--addfilter' and usr_access_level > 0:
cur.execute('INSERT INTO filter VALUES(?,?)', [None,str(message)])
new_message = 'Message added to filter list. Any message, to any address on this server, with this text in it will be deleted and no other actions taken.'
elif command == '--listfilters' and usr_access_level > 0:
new_message = listFilters(con)
elif command == '--clearfilter' and usr_access_level > 0:
tmp_msg = str(message).lower()
tmp_msg = tmp_msg.strip()
if is_int(tmp_msg):
try:
cur.execute("DELETE FROM filter WHERE id=?",[tmp_msg])
except:
new_message = 'Removing filter (%s) failed. Are you sure you chose the correct filter number?'
else:
new_message = 'Filter (%s) successfully removed.'
elif tmp_msg == 'all':
cur.execute("DROP TABLE IF EXISTS filter")
cur.execute("CREATE TABLE filter(id INTEGER PRIMARY KEY, banned_text TEXT)")
else:
new_message = 'Invalid filter ID: %s' % tmp_msg
elif command == '--setlabel' and usr_access_level > 1:
tmp_label = str(message)
cur.execute("UPDATE bm_addresses_config SET label=? WHERE bm_address=?",[tmp_label,ident_address])
new_message = 'Address Label successfully set to [%s].' % tmp_label
elif command == '--setmotd' and usr_access_level > 1:
tmp_motd = str(message)
cur.execute("UPDATE bm_addresses_config SET motd=? WHERE bm_address=?",[tmp_motd,ident_address])
new_message = 'Message Of The Day successfully set to (%s).' % tmp_motd
elif command == '--addmoderator' and usr_access_level > 1:
new_message = addModerator(con,ident_address,usr_bm_address,message,new_subject)
elif command == '--remmoderator' and usr_access_level > 1:
new_message = remPrivilege(con,ident_address,message)
elif command == '--listmoderators' and usr_access_level > 1:
new_message = listModerators(con,ident_address)
elif command == '--addadmin' and usr_access_level > 2:
new_message = addAdmin(con,ident_address,usr_bm_address,message,new_subject)
elif command == '--remadmin' and usr_access_level > 2:
new_message = remPrivilege(con,ident_address,message)
elif command == '--listadmins' and usr_access_level > 2:
new_message = listAdmins(con,ident_address)
elif command == '--addadmin+' and usr_access_level > 3:
new_message = addAdminPlus(con,ident_address,usr_bm_address,message,new_subject)
elif command == '--remadmin+' and usr_access_level > 3:
new_message = remPrivilege(con,ident_address,message)
elif command == '--listadmin+' and usr_access_level > 3:
new_message = listAdminPlus(con,ident_address)
elif command == '--sendbroadcast' and usr_access_level > 1:
send_broadcast(con,ident_address,new_subject,message)
elif command == '--listusers' and usr_access_level > 1:
new_message = listUsers(con,ident_address)
elif command == '--setmailinglist' and usr_access_level > 2:
cur.execute("UPDATE bm_addresses_config SET echo_address=? WHERE bm_address=?",['false',ident_address])
new_message = 'Address set as a Mailing List. It will now broadcast messages that are sent to it.'
elif command == '--setecho' and usr_access_level > 2:
cur.execute("UPDATE bm_addresses_config SET echo_address=? WHERE bm_address=?",['true',ident_address])
new_message = 'Address set as an Echo Address. It will now reply to all messages sent to it.'
elif command == '--setblacklist' and usr_access_level > 2:
cur.execute("UPDATE bm_addresses_config SET whitelisted=? WHERE bm_address=?",['false',ident_address])
new_message = 'Address Blacklisted. Anyone that has not been Blacklisted can use this address.'
elif command == '--setwhitelist' and usr_access_level > 2:
cur.execute("UPDATE bm_addresses_config SET whitelisted=? WHERE bm_address=?",['true',ident_address])
new_message = 'Address Whitelisted. Only Whitelisted users will be able to use this address. Use "--inviteUser" to invite new users.'
elif command == '--setmaxlength' and usr_access_level > 1:
message = message.lower()
message = message.strip()
if is_int(message):
cur.execute("UPDATE bm_addresses_config SET max_msg_length=? WHERE bm_address=?",[int(message),ident_address])
new_message = 'Maximum message length successfully changed to %s characters. Messages longer than this will be truncated.' % message
else:
cmd_failed = True
elif command == '--enable' and usr_access_level > 1:
cur.execute("UPDATE bm_addresses_config SET enabled=? WHERE bm_address=?",['true',ident_address])
new_message = 'Address enabled.'
elif command == '--disable' and usr_access_level > 1:
cur.execute("UPDATE bm_addresses_config SET enabled=? WHERE bm_address=?",['false',ident_address])
new_message = 'Address disabled. NOTE: Admins and Moderators will still be able to perform commands.'
elif command == '--generatenewaddress' and usr_access_level > 2:
if message != '':
if len(message) <= 32:
tmp_label = str(message)
new_address = generateAddress(con,tmp_label)
new_address = new_address[3:]
new_message = 'Address (BM-%s) successfully generated with Label (%s)' % (new_address,tmp_label)
else:
new_address = generateAddress(con)
new_address = new_address[3:]
tmp_label = 'no label'
new_message = 'Label too long (Max 32). Address (BM-%s) successfully generated with default Label (%s)' % (new_address,tmp_label)
else:
new_address = generateAddress(con)
new_address = new_address[3:]
tmp_label = 'no label'
new_message = 'No Label specified. Address (BM-%s) successfully generated with default Label (%s)' % (new_address,tmp_label)
# Initalize address in database
throwAway = get_bm_ident_info(con,ident_address)
cur.execute("UPDATE bm_addresses_config SET label=? WHERE bm_address=?",[tmp_label,ident_address])
con.commit()
throwAway = addAdmin(con,ident_address,usr_bm_address,message,new_subject)
elif command == '--getstats' and usr_access_level > 1:
new_message = getStats(con,ident_address,message)
elif command == '--getcommandhistory' and usr_access_level > 1:
new_message = getCommandHistory(con,ident_address)
elif command == '--getinfo' and usr_access_level > 2:
new_message = getInfo(con)
elif usr_access_level > 0:
new_message = 'Unknown command or insufficent privileges: %s' % str(subject)
# Note: user with access level 0 will not get a reply. This prevents a DOS attack vector.
con.commit()
if cmd_failed:
new_message = 'Command failed. (%s) (%s)' %(subject,message)
if new_message != '':
send_message(con,usr_bm_address,ident_address,new_subject,new_message)
except Exception as e:
print 'perform_command ERROR: ',e
def echo(con,myAddress,replyAddress,message,subject):
try:
apiurl = api_data(con)
if not apiurl: return
api = xmlrpclib.ServerProxy(apiurl)
temp = get_bm_ident_info(con, myAddress)
if temp == None:
print 'echo error, no data'
return None
label,enabled,motd,whitelisted,max_msg_length,echo_address = temp
subject = subject.lstrip() # Removes prefix white spaces
if (len(subject) > 32): # Truncates the subject if it is too long
subject = (subject[:32] + '... Truncated')
#if (str(subject[:len(label)+1]) != '%s:' % label):
# subject = '%s: %s'% (label,subject) #only adds prefix if not already there
if (len(message) > int(max_msg_length)) and (str(max_msg_length) != '0'): # Truncates the message if it is too long
message = (message[:int(max_msg_length)] + '... Truncated to %s characters.\n' % max_msg_length)
echoMessage = ('Message successfully received at ' + strftime("%Y-%m-%d %H:%M:%S",gmtime()) + ' UTC/GMT.\n' + '-------------------------------------------------------------------------------\n' + message + '\n\n\n' + str(motd))
send_message(con,replyAddress,myAddress,subject,echoMessage)
except Exception as e:
print 'echo ERROR: ',e
def mailing_list(con,myAddress,replyAddress,message,subject):
try:
cur = con.cursor()
temp = get_bm_ident_info(con, myAddress) # Get info about the address it was sent to(our address)
if temp == None:
print 'mailing_list error, no data'
return None
label,enabled,motd,whitelisted,max_msg_length,echo_address = temp
# Only label,motd,and max_msg_length used here
max_msg_length = int(max_msg_length)
subject = subject.lstrip() # Removes left spaces
if (len(subject) > 64): # Truncates the subject if it is too long
subject = (subject[:64] + '...')
if (str((subject[:3]).lower()) == 're:'):
subject = subject[3:] # Removes re: or RE: from subject
subject = subject.lstrip() # Removes left spaces
if (str(subject[:len(label)+2]) == '[%s]'% label):
subject = subject[len(label)+2:] # Removes label
subject = subject.lstrip() # Removes left spaces
subject = '[%s] %s'% (label,subject)
if (len(message) > max_msg_length) and (str(max_msg_length) != '0'): # Truncates the message if it is too long
message = (message[:max_msg_length] + '... Truncated to %s characters.\n' % max_msg_length)
# Get nickname
cur.execute("SELECT ident_bm_address,nickname FROM users_config WHERE usr_bm_address=?",[replyAddress])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
nickname = 'anonymous'
break
else:
ident_address,nickname = temp
if ident_address == myAddress:
if nickname == None or nickname == '':
nickname = 'anonymous'
else:
nickname = str(nickname)
break
message = strftime("%a, %Y-%m-%d %H:%M:%S UTC",gmtime()) + ' Message ostensibly from BM-%s (%s):\n%s\n\n%s' % (replyAddress,nickname,motd,message)
send_broadcast(con,myAddress,subject,message) # Build the message and send it
except Exception as e:
print 'mailing_list ERROR: ',e
def send_message(con,to_address,from_address,subject,message):
try:
apiurl = api_data(con)
if not apiurl: return
api = xmlrpclib.ServerProxy(apiurl) # Connect to BitMessage
subject = subject.encode('base64') # Encode the subject
message = message.encode('base64') # Encode the message.
api.sendMessage(to_address,from_address,subject,message) # Build the message and send it
# Add to daily stats
date_day = strftime("%Y-%m-%d",gmtime())
cur = con.cursor()
cur.execute('SELECT id,bm_address,num_sent_messages FROM stats WHERE date_day=?' ,[date_day,])
temp = cur.fetchone()
if temp == None or temp == '':
# Inserting new day
cur.execute('INSERT INTO stats VALUES(?,?,?,?,?)', [None,date_day,from_address,0,1])
else:
id_num,bm_address,num_sent_messages = temp
if bm_address == from_address:
num_sent_messages += 1
cur.execute("UPDATE stats SET num_sent_messages=? WHERE id=?", (num_sent_messages,id_num))
con.commit()
print 'Message sent'
except Exception as e:
print 'send_message ERROR: ',e
def send_broadcast(con,broadcast_address,subject,message):
try:
apiurl = api_data(con)
if not apiurl: return
api = xmlrpclib.ServerProxy(apiurl) # Connect to BitMessage
subject = subject.encode('base64') # Encode the subject
message = message.encode('base64') # Encode the message.
api.sendBroadcast(broadcast_address,subject,message) # Build the broadcast and send it
# Add to daily stats
date_day = strftime("%Y-%m-%d",gmtime())
cur = con.cursor()
cur.execute('SELECT id,bm_address,num_sent_broadcasts FROM stats WHERE date_day=?' ,[date_day,])
temp = cur.fetchone()
if temp == None or temp == '':
# Inserting new day
cur.execute('INSERT INTO stats VALUES(?,?,?,?,?)', [None,date_day,broadcast_address,1,0])
else:
id_num,bm_address,num_sent_broadcasts = temp
if bm_address == broadcast_address:
num_sent_broadcasts += 1
cur.execute("UPDATE stats SET num_sent_broadcasts=? WHERE id=?", (num_sent_broadcasts,id_num))
con.commit()
print 'Broadcast sent'
except Exception as e:
print 'send_broadcast ERROR: ',e
def accept_invite(con,ident_address,usr_bm_address,subject):
usr_bm_address = format_address(usr_bm_address)
ident_address = format_address(ident_address)
subject = str(subject).lower()
subject = subject.lstrip()
if 'accept' in subject:
cur = con.cursor()
if is_address(usr_bm_address):
cur.execute("SELECT id,ident_bm_address,whitelisted_blacklisted FROM users_config WHERE usr_bm_address=?",[usr_bm_address])
while True:
temp = cur.fetchone()
if temp == None or temp == '':
return False
else:
id_num,ident_bm_address,whitelisted_blacklisted = temp
if ident_bm_address == ident_address and whitelisted_blacklisted == 'invited':
addWhiteList(con,ident_address,usr_bm_address,'[BM-Moderator]')
return True
else:
return False
def main_loop():
print 'bmModerator - Starting up main loop in 10 seconds.'
time.sleep(10) # Sleep to allow bitmessage to start up
con = lite.connect(database) #Only connects to database when needed
while True:
try:
# Check if messages in inbox
if check_if_new_msg(con):
print 'Message found. Processing'
temp = process_new_message(con)
if temp == None:
print 'No actions'
pass # Perform no actions
else:
toAddress,fromAddress,message,subject = temp
if is_blacklisted(con,toAddress,fromAddress): # Check if address is blacklisted
print 'Blacklisted User'
pass # Perform no actions
elif is_command(subject): # Check if a command is being attempted
print 'Command discovered: ',str(subject)
throwAway = get_bm_ident_info(con,toAddress)
# Initalize address if not already done. throwAway variable is not used
perform_command(con,toAddress,fromAddress,message,subject) # Performs command actions and sends necessary broadcast/message
else:
print 'Other discovered'
temp2 = get_bm_ident_info(con,toAddress) # Get info about the address it was sent to(our address)
if temp2 != None:
label,enabled,motd,whitelisted,max_msg_length,echo_address = temp2
if accept_invite(con,toAddress,fromAddress,subject):
print 'Accepting invite'
tmp_msg = 'Congratulations, you have been added to this address. You can set your nickname by replying with Subject:"--setNick" and Message:"Your Nickname"' % (usr_bm_address,ident_address)
send_message(con,fromAddress,toAddress,'[BM-MODERATOR]',tmp_msg)
elif (str(enabled).lower() != 'false'):
# Determine permissions of ident address and user address
if (str(whitelisted).lower() != 'true'):
performAction = True
elif (str(whitelisted).lower() == 'true' and is_whitelisted(con,toAddress,fromAddress)):
performAction = True
elif is_global_admin(con,fromAddress):
performAction = True
elif is_adminPlus(con,toAddress,fromAddress):
performAction = True
elif is_admin(con,toAddress,fromAddress):
performAction = True
elif is_moderator(con,toAddress,fromAddress):
performAction = True
else:
performAction = False
if performAction:
if str(echo_address).lower() == 'true':
print 'Echo'
echo(con,toAddress,fromAddress,message,subject)
else:
print 'Mailing List'
mailing_list(con,toAddress,fromAddress,message,subject)
else:
print 'Insufficient Privileges'
print 'Finished with Message.'
# Check again, this time to determine sleep time and whether or not to close the database connection
if check_if_new_msg(con):
print 'sleep 1'
time.sleep(1) # How often to loop when there are messages
else:
time.sleep(15) # How often to run the loop on no msg
except Exception as e:
print 'main_loop ERROR: ',e
print 'sleep 30'
time.sleep(30)
con.close()
def initConfig():
print '-Initalizing Moderator-\n'
print 'Would you like to (I)nitalize the application, update the (A)PI info,'
print '(S)et the global administrator, or (G)enerate a new random identity?(I/A/S/G)'
uInput = raw_input('> ')
if uInput.lower() == 'i':
print 'Any existing databases will be deleted.'
print 'Are you sure that you want to continue? (Y/N)'
uInput = raw_input('> ')
if uInput.lower() == 'y':
create_db()
print 'Databases Created'
else:
print '-Aborted-\n'
return ''
elif uInput.lower() == 'a':
con = lite.connect(database)
cur = con.cursor()
print "Please enter the following API Information\n"
api_port = raw_input('API Port> ')
api_address = raw_input('API Address> ')
api_username = raw_input('API username> ')
api_password = raw_input('API Password> ')
api_port = str(api_port)
api_address = str(api_address)
api_username = str(api_username)
api_password = str(api_password)
cur.execute("UPDATE api_config SET api_port=? WHERE id=?",(api_port,0))
cur.execute("UPDATE api_config SET api_address=? WHERE id=?",(api_address,0))
cur.execute("UPDATE api_config SET api_username=? WHERE id=?",(api_username,0))
cur.execute("UPDATE api_config SET api_password=? WHERE id=?",(api_password,0))
print '\nSuccessfully updated API information.'
print 'Please setup the apinotifypath through Bitmessage if you have not already.'
print 'Please setup a Global Administrator if you have not already.\n'
con.commit()
cur.close()
con.close()
elif uInput.lower() == 's':
while True:
print "Please enter the Gloabl Administrator's Bitmessage Address"
uInput = raw_input('> ')
if is_address(uInput):
bm_address = uInput
bm_address = format_address(bm_address)
con = lite.connect(database)
cur = con.cursor()
cur.execute("UPDATE api_config SET global_admin_bm_address=? WHERE id=?",(bm_address,0))
print 'Global Admin successfully changed. This address can perform "Owner" commands.'
con.commit()
cur.close()
con.close()
break
else:
print 'Invalid address. Try again'
elif uInput.lower() == 'g':
con = lite.connect(database)
cur = con.cursor()
the_address = generateAddress(con)
if validAddress(the_address):
# Let's alert the global admin. Find address if it exists
cur.execute("SELECT global_admin_bm_address FROM api_config WHERE id=?",('0',))
temp = cur.fetchone()
if temp == None or temp == '':
print '\nAddress Generated (BM-%s)\n' % the_address
print 'Global Admin not set. Auto-Notification not sent.'
pass
else:
global_admin_bm_address = str(temp[0])
addAdmin(con,the_address,'Command_Line_User',global_admin_bm_address,'[BM-Moderator]')
print '\nAddress Generated (BM-%s) and Global Admin (BM-%s) notified.' % (the_address,global_admin_bm_address)
else:
print 'ERROR generating address: ', the_address
cur.close()
print 'Finished\n'
def main():
try:
arg = sys.argv[1]
if arg == "startingUp":
main_loop()
sys.exit()
elif arg == "newMessage":
# TODO, check if process already running, if not, start
# - This could be used in the event of this process stopping for an unknown reason
sys.exit()
elif arg == "newBroadcast":
sys.exit()
elif arg == "initalize":
initConfig()
sys.exit()
elif arg == "apiTest":
pass
# TODO, add apiTest function
else:
print 'unknown command (%s)' % arg
sys.exit() # Not a relevant argument, exit
except Exception as e:
print e
if __name__ == '__main__':
try:
main()
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
print ("Crtl+C Pressed. Shutting down.")
| 70,409 | 20,165 |
# Generated by Django 2.2.24 on 2021-09-21 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geocontrib', '0013_importtask'),
]
operations = [
migrations.AddField(
model_name='featuretype',
name='title_optional',
field=models.BooleanField(default=False, verbose_name='Titre optionnel'),
),
]
| 428 | 140 |
"""
@author: Federica Gugole
__license__= "LGPL"
"""
import numpy as np
import easyvvuq as uq
import os
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, NullFormatter
plt.rcParams.update({'font.size': 20})
plt.rcParams['figure.figsize'] = 8,6
"""
*****************
* VVUQ ANALYSES *
*****************
"""
# home directory of this file
HOME = os.path.abspath(os.path.dirname(__file__))
# Reload the campaign
workdir = '/export/scratch1/federica/VirsimCampaigns'
campaign = uq.Campaign(state_file = "campaign_state_PO_MC2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', campaign.campaign_dir.split('/')[-1])
print('========================================================')
# get sampler from my_campaign object
sampler = campaign._active_sampler
# collate output
campaign.collate()
# get full dataset of data
data = campaign.get_collation_result()
#print(data.columns)
# get analysis object
output_columns = campaign._active_app_decoder.output_columns
qmc_analysis = uq.analysis.QMCAnalysis(sampler=sampler, qoi_cols=output_columns)
###
IC_capacity = 109
n_runs = 2000
n_params = 4
pl_intervention_effect_hi, tmp1, tmp2 = qmc_analysis._separate_output_values(data['lockdown_effect',0], n_params, n_runs)
phase_interval, tmp1, tmp2 = qmc_analysis._separate_output_values(data['phase_interval',0], n_params, n_runs)
uptake, tmp1, tmp2 = qmc_analysis._separate_output_values(data['uptake',0], n_params, n_runs)
IC_prev_avg_max, tmp1, tmp2 = qmc_analysis._separate_output_values(data['IC_prev_avg_max',0], n_params, n_runs)
IC_ex_max, tmp1, tmp2 = qmc_analysis._separate_output_values(data['IC_ex_max',0], n_params, n_runs)
###
q_phase_interval = np.quantile(phase_interval,[0, 0.25, 0.5, 0.75, 1])
# Take slabs of data corresponding to the quartiles of phase_interval
pl_intervention_effect_hi_q = np.zeros((int(n_runs/4),4),dtype='float')
uptake_q = np.zeros((int(n_runs/4),4),dtype='float')
IC_prev_avg_max_q = np.zeros((int(n_runs/4),4),dtype='float')
IC_ex_max_q = np.zeros((int(n_runs/4),4),dtype='float')
cnt0 = 0; cnt1 = 0; cnt2 = 0; cnt3=0
for i in range(n_runs):
if (phase_interval[i] >= q_phase_interval[0]) & (phase_interval[i] < q_phase_interval[1]):
# first quartile
pl_intervention_effect_hi_q[cnt0,0] = pl_intervention_effect_hi[i]
uptake_q[cnt0,0] = uptake[i]
IC_prev_avg_max_q[cnt0,0] = IC_prev_avg_max[i]
IC_ex_max_q[cnt0,0] = IC_ex_max[i]
cnt0 += 1
if (phase_interval[i] >= q_phase_interval[1]) & (phase_interval[i] < q_phase_interval[2]):
# second quartile
pl_intervention_effect_hi_q[cnt1,1] = pl_intervention_effect_hi[i]
uptake_q[cnt1,1] = uptake[i]
IC_prev_avg_max_q[cnt1,1] = IC_prev_avg_max[i]
IC_ex_max_q[cnt1,1] = IC_ex_max[i]
cnt1 += 1
if (phase_interval[i] >= q_phase_interval[2]) & (phase_interval[i] < q_phase_interval[3]):
# third quartile
pl_intervention_effect_hi_q[cnt2,2] = pl_intervention_effect_hi[i]
uptake_q[cnt2,2] = uptake[i]
IC_prev_avg_max_q[cnt2,2] = IC_prev_avg_max[i]
IC_ex_max_q[cnt2,2] = IC_ex_max[i]
cnt2 += 1
if (phase_interval[i] >= q_phase_interval[3]) & (phase_interval[i] <= q_phase_interval[4]):
# fourth quartile
pl_intervention_effect_hi_q[cnt3,3] = pl_intervention_effect_hi[i]
uptake_q[cnt3,3] = uptake[i]
IC_prev_avg_max_q[cnt3,3] = IC_prev_avg_max[i]
IC_ex_max_q[cnt3,3] = IC_ex_max[i]
cnt3 += 1
"""
* Heatmap for IC_prev_avg_max
"""
f = plt.figure('heatmap_IC_prev',figsize=[12,12])
ax_0 = f.add_subplot(221, ylabel='Uptake by the population')
im_0 = ax_0.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,0] <= IC_capacity),0], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,0] <= IC_capacity),0], c='black')
im_0 = ax_0.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,0] > IC_capacity),0], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,0] > IC_capacity),0], c=IC_prev_avg_max_q[np.where(IC_prev_avg_max_q[:,0] > IC_capacity),0], \
cmap='plasma', vmin=np.min(IC_prev_avg_max[np.where(IC_prev_avg_max > IC_capacity)]), vmax=np.max(IC_prev_avg_max))
cbar_0 = f.colorbar(im_0, ax=ax_0)
cbar_0.set_ticks([200, 350, 500, 650])
cbar_0.set_ticklabels(['200', '350', '500', '650'])
ax_0.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5])
ax_0.set_yticks([0.6, 0.8, 1])
ax_1 = f.add_subplot(222)
im_1 = ax_1.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,1] <= IC_capacity),1], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,1] <= IC_capacity),1], c='black')
im_1 = ax_1.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,1] > IC_capacity),1], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,1] > IC_capacity),1], c=IC_prev_avg_max_q[np.where(IC_prev_avg_max_q[:,1] > IC_capacity),1], \
cmap='plasma', vmin=np.min(IC_prev_avg_max[np.where(IC_prev_avg_max > IC_capacity)]), vmax=np.max(IC_prev_avg_max))
cbar_1 = f.colorbar(im_1, ax=ax_1)
cbar_1.set_ticks([200, 350, 500, 650])
cbar_1.set_ticklabels(['200', '350', '500', '650'])
ax_1.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5])
ax_1.set_yticks([0.6, 0.8, 1])
ax_2 = f.add_subplot(223, xlabel='Relative level of transmission \n where still in lockdown', ylabel='Uptake by the population')
im_2 = ax_2.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,2] <= IC_capacity),2], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,2] <= IC_capacity),2], c='black')
im_2 = ax_2.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,2] > IC_capacity),2], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,2] > IC_capacity),2], c=IC_prev_avg_max_q[np.where(IC_prev_avg_max_q[:,2] > IC_capacity),2], \
cmap='plasma', vmin=np.min(IC_prev_avg_max[np.where(IC_prev_avg_max > IC_capacity)]), vmax=np.max(IC_prev_avg_max))
cbar_2 = f.colorbar(im_2, ax=ax_2)
cbar_2.set_ticks([200, 350, 500, 650])
cbar_2.set_ticklabels(['200', '350', '500', '650'])
ax_2.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5])
ax_2.set_yticks([0.6, 0.8, 1])
ax_3 = f.add_subplot(224, xlabel='Relative level of transmission \n where still in lockdown')
im_3 = ax_3.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,3] <= IC_capacity),3], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,3] <= IC_capacity),3], c='black')
im_3 = ax_3.scatter(x=pl_intervention_effect_hi_q[np.where(IC_prev_avg_max_q[:,3] > IC_capacity),3], \
y=uptake_q[np.where(IC_prev_avg_max_q[:,3] > IC_capacity),3], c=IC_prev_avg_max_q[np.where(IC_prev_avg_max_q[:,3] > IC_capacity),3], \
cmap='plasma', vmin=np.min(IC_prev_avg_max[np.where(IC_prev_avg_max > IC_capacity)]), vmax=np.max(IC_prev_avg_max))
cbar_3 = f.colorbar(im_3, ax=ax_3)
cbar_3.set_ticks([200, 350, 500, 650])
cbar_3.set_ticklabels(['200', '350', '500', '650'])
ax_3.set_xticks([0.1, 0.2, 0.3, 0.4, 0.5])
ax_3.set_yticks([0.6, 0.8, 1])
plt.tight_layout()
f.savefig('figures/Fig7_heatmap_PO_IC_prev.eps')
"""
* Heatmap for IC_ex_max
"""
f = plt.figure('heatmap_IC_ex',figsize=[12,12])
ax_0 = f.add_subplot(221, ylabel='Uptake by the population')
im_0 = ax_0.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,0] == 0),0], \
y=uptake_q[np.where(IC_ex_max_q[:,0] == 0),0], c='black')
im_0 = ax_0.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,0] > 0),0], \
y=uptake_q[np.where(IC_ex_max_q[:,0] > 0),0], c=IC_ex_max_q[np.where(IC_ex_max_q[:,0] > 0),0], cmap='plasma', \
vmin=np.min(IC_ex_max[np.where(IC_ex_max > 0)]), vmax=np.max(IC_ex_max))
cbar_0 = f.colorbar(im_0, ax=ax_0)
cbar_0.set_ticks([1e4, 2e4, 3e4, 4e4])
cbar_0.set_ticklabels(['10000', '20000', '30000', '40000'])
ax_0.set_xticks([0.2, 0.3, 0.4])
ax_0.set_yticks([0.6, 0.8, 1])
ax_1 = f.add_subplot(222)
im_1 = ax_1.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,1] == 0),1], \
y=uptake_q[np.where(IC_ex_max_q[:,1] == 0),1], c='black')
im_1 = ax_1.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,1] > 0),1], \
y=uptake_q[np.where(IC_ex_max_q[:,1] > 0),1], c=IC_ex_max_q[np.where(IC_ex_max_q[:,1] > 0),1], cmap='plasma', \
vmin=np.min(IC_ex_max[np.where(IC_ex_max > 0)]), vmax=np.max(IC_ex_max))
cbar_1 = f.colorbar(im_1, ax=ax_1)
cbar_1.set_ticks([1e4, 2e4, 3e4, 4e4])
cbar_1.set_ticklabels(['10000', '20000', '30000', '40000'])
ax_1.set_xticks([0.2, 0.3, 0.4])
ax_1.set_yticks([0.6, 0.8, 1])
ax_2 = f.add_subplot(223, xlabel='Effect of intervention \n where not yet lifted', ylabel='Uptake by the population')
im_2 = ax_2.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,2] == 0),2], \
y=uptake_q[np.where(IC_ex_max_q[:,2] == 0),2], c='black')
im_2 = ax_2.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,2] > 0),2], \
y=uptake_q[np.where(IC_ex_max_q[:,2] > 0),2], c=IC_ex_max_q[np.where(IC_ex_max_q[:,2] > 0),2], cmap='plasma', \
vmin=np.min(IC_ex_max[np.where(IC_ex_max > 0)]), vmax=np.max(IC_ex_max))
cbar_2 = f.colorbar(im_2, ax=ax_2)
cbar_2.set_ticks([1e4, 2e4, 3e4, 4e4])
cbar_2.set_ticklabels(['10000', '20000', '30000', '40000'])
ax_2.set_xticks([0.2, 0.3, 0.4])
ax_2.set_yticks([0.6, 0.8, 1])
ax_3 = f.add_subplot(224, xlabel='Effect of intervention \n where not yet lifted')
im_3 = ax_3.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,3] == 0),3], \
y=uptake_q[np.where(IC_ex_max_q[:,3] == 0),3], c='black')
im_3 = ax_3.scatter(x=pl_intervention_effect_hi_q[np.where(IC_ex_max_q[:,3] > 0),3], \
y=uptake_q[np.where(IC_ex_max_q[:,3] > 0),3], c=IC_ex_max_q[np.where(IC_ex_max_q[:,3] > 0),3], cmap='plasma', \
vmin=np.min(IC_ex_max[np.where(IC_ex_max > 0)]), vmax=np.max(IC_ex_max))
cbar_3 = f.colorbar(im_3, ax=ax_3)
cbar_3.set_ticks([1e4, 2e4, 3e4, 4e4])
cbar_3.set_ticklabels(['10000', '20000', '30000', '40000'])
ax_3.set_xticks([0.2, 0.3, 0.4])
ax_3.set_yticks([0.6, 0.8, 1])
plt.tight_layout()
#f.savefig('figures/heatmap_PO_IC_ex.eps')
"""
* 3D plots *
"""
f = plt.figure('heatmap',figsize=[16,6])
ax_p = f.add_subplot(121, xlabel='pl_intervention_effect_hi', ylabel='phase_interval', zlabel='uptake', projection='3d')
im_p = ax_p.scatter(xs=pl_intervention_effect_hi[np.where(IC_prev_avg_max <= IC_capacity)], \
ys=phase_interval[np.where(IC_prev_avg_max <= IC_capacity)], zs=uptake[np.where(IC_prev_avg_max <= IC_capacity)], \
c='black')
im_p = ax_p.scatter(xs=pl_intervention_effect_hi[np.where(IC_prev_avg_max > IC_capacity)], \
ys=phase_interval[np.where(IC_prev_avg_max > IC_capacity)], zs=uptake[np.where(IC_prev_avg_max > IC_capacity)], \
c=IC_prev_avg_max[np.where(IC_prev_avg_max > IC_capacity)], cmap='plasma')
cbar_p = f.colorbar(im_p, ax=ax_p)
cbar_p.set_ticks([200, 350, 500, 650])
cbar_p.set_ticklabels(['200', '350', '500', '650'])
ax_p.set_xticks([0.2, 0.4])
ax_p.set_yticks([30, 60, 90])
ax_p.set_zticks([0.5, 0.75, 1])
ax_p.xaxis.labelpad = 10
ax_p.yaxis.labelpad = 10
ax_p.zaxis.labelpad = 15
ax_p.view_init(azim=60)
ax_e = f.add_subplot(122, xlabel='pl_intervention_effect_hi', ylabel='phase_interval', zlabel='uptake', projection='3d')
im_e = ax_e.scatter(xs=pl_intervention_effect_hi[np.where(IC_ex_max == 0)], ys=phase_interval[np.where(IC_ex_max == 0)], \
zs=uptake[np.where(IC_ex_max == 0)], c='black')
im_e = ax_e.scatter(xs=pl_intervention_effect_hi[np.where(IC_ex_max > 0)], ys=phase_interval[np.where(IC_ex_max > 0)], \
zs=uptake[np.where(IC_ex_max > 0)], c=IC_ex_max[np.where(IC_ex_max > 0)], cmap='plasma')
cbar_e = f.colorbar(im_e, ax=ax_e)
cbar_e.set_ticks([1e4, 2e4, 3e4, 4e4])
cbar_e.set_ticklabels(['10000', '20000', '30000', '40000'])
ax_e.set_xticks([0.2, 0.4])
ax_e.set_yticks([30, 60, 90])
ax_e.set_zticks([0.5, 0.75, 1])
ax_e.xaxis.labelpad = 10
ax_e.yaxis.labelpad = 10
ax_e.zaxis.labelpad = 15
ax_e.view_init(azim=60)
plt.tight_layout()
#f.savefig('figures/heatmap_PO.eps')
plt.show()
### END OF CODE ###
| 11,998 | 6,054 |
import torch.nn as nn
from IPython import embed
class FeatureExtractor(nn.Module):
def __init__(self,submodule,extracted_layers):
super(FeatureExtractor,self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.submodule._modules.items():
if name is "classfier":
x = x.view(x.size(0),-1)
if name is "base":
for block_name, cnn_block in module._modules.items():
x = cnn_block(x)
if block_name in self.extracted_layers:
outputs.append(x)
return outputs | 715 | 202 |
# -*- coding: utf-8 -*-
"""
75. Sort Colors
Given an array with n objects colored red, white or blue,
sort them so that objects of the same color are adjacent,
with the colors in the order red, white and blue.
Here, we will use the integers 0, 1, and 2
to represent the color red, white, and blue respectively.
Note:
You are not suppose to use the library's sort function for this problem.
click to show follow up.
Follow up:
A rather straight forward solution is a two-pass algorithm using counting sort.
First, iterate the array counting number of 0's, 1's, and 2's,
then overwrite array with total number of 0's, then 1's and followed by 2's.
Could you come up with an one-pass algorithm using only constant space?
"""
class Solution(object):
# two pass
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
# dictionary counting number of 0's, 1's, and 2's
count = {0: 0,
1: 0,
2: 0}
for num in nums:
count[num] += 1
# for i in nums:
# if i ==0:
# count[0]+=1
# if i ==1:
# count[1]+=1
# if i ==2:
# count[2]+=2
A = []
for i in range(3):
A += [i] * count[i]
# overwrite original array
nums[:] = A[:]
def sortColors(self, nums):
"""
http://www.cnblogs.com/zuoyuan/p/3775832.html
解题思路:这道题不允许使用排序库函数。
那么最直观的解法是:遍历两遍数组,第一遍对0,1,2计数,
第二遍对数组进行赋值,这样是可以ac的。
但题目的要求是只使用常数空间,而且只能遍历一遍。
那么思路就比较巧妙了。
设置两个头尾指针,头指针p0指向的位置是0该放置的位置,
尾指针p2指向的位置是2该放置的位置。
i用来遍历整个数组,
碰到0把它和p0指向的数交换,
碰到2把它和p2指向的数交换,
碰到1继续向后遍历。
有点类似快速排序的分割数组这一步。
"""
if not nums:
return
# 3 pointers
p0 = 0 # all elements left of p0 are 0
p1 = 0
p2 = len(nums) - 1 # all elements right of p2 are 2
while p1 <= p2:
if nums[p1] == 2:
nums[p1], nums[p2] = nums[p2], nums[p1]
# do not advance i here
# because p2 position could be 0
# which can be caught by next condition
p2 -= 1
elif nums[p1] == 0:
nums[p1], nums[p0] = nums[p0], nums[p1]
p0 += 1
p1 += 1 # advance i here because position p0 already known as 1
else: # n[p1]==1, no swap. increment i.
p1 += 1
return nums
if __name__ == '__main__':
print Solution().sortColors([0, 1, 0, 0, 1, 2, 1, 0])
print Solution().sortColors([1, 0, 0])
| 2,758 | 1,134 |
#!/usr/bin/env python3
"""
LINCS REST API client
New (2019) iLINCS:
http://www.ilincs.org/ilincs/APIinfo
http://www.ilincs.org/ilincs/APIdocumentation
(http://lincsportal.ccs.miami.edu/dcic/api/ DEPRECATED?)
"""
###
import sys,os,argparse,re,time,json,logging
#
from .. import lincs
#
#############################################################################
if __name__=='__main__':
epilog="""\
Examples:
NCBI Gene IDs: 207;
PerturbagenIDs: BRD-A00100033 (get_compound);
LINCS PertIDs: LSM-2121;
Perturbagen-Compound IDs: LSM-2421;
Signature IDs: LINCSCP_10260,LINCSCP_10261,LINCSCP_10262;
Dataset IDs: EDS-1013,EDS-1014;
Search Terms: cancer, vorinostat, MCF7.
"""
parser = argparse.ArgumentParser(description=f'LINCS REST API client ({lincs.Utils.API_HOST})', epilog=epilog)
ops = [
'get_gene', 'get_compound', 'get_dataset',
'list_genes', 'list_compounds', 'list_datasets',
'search_dataset', 'search_signature',
'get_signature'
]
parser.add_argument("op", choices=ops, help='OPERATION')
parser.add_argument("--i", dest="ifile", help="input file, IDs")
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--ids", help="input IDs, comma-separated")
parser.add_argument("--searchTerm", dest="searchTerm", help="Entity searchTerm e.g. Rock1)")
parser.add_argument("--lincs_only", action="store_true", help="LINCS datasets only")
parser.add_argument("--ngene", type=int, default=50, help="top genes per signature")
parser.add_argument("--nmax", type=int, help="max results")
parser.add_argument("--skip", type=int, help="skip results")
parser.add_argument("--api_host", default=lincs.Utils.API_HOST)
parser.add_argument("--api_base_path", default=lincs.Utils.API_BASE_PATH)
parser.add_argument("-v", "--verbose", action="count", default=0)
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
base_url = 'https://'+args.api_host+args.api_base_path
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
ids=[];
if args.ifile:
fin = open(args.ifile)
while True:
line = fin.readline()
if not line: break
ids.append(line.strip())
elif args.ids:
ids = re.split('[,\s]+', args.ids.strip())
if args.op == 'get_gene':
lincs.Utils.GetGene(ids, base_url, fout)
elif args.op == 'get_compound':
lincs.Utils.GetCompound(ids, base_url, fout)
elif args.op == 'list_compounds':
lincs.Utils.ListCompounds(base_url, fout)
elif args.op == 'get_dataset':
lincs.Utils.GetDataset(ids, base_url, fout)
elif args.op == 'search_dataset':
lincs.Utils.SearchDataset(args.searchTerm, args.lincs_only, base_url, fout)
elif args.op == 'search_signature':
lincs.Utils.SearchSignature(ids, args.lincs_only, base_url, fout)
elif args.op == 'get_signature':
lincs.Utils.GetSignature(ids, args.ngene, base_url, fout)
else:
parser.error(f'Invalid operation: {args.op}')
| 3,004 | 1,156 |
import os
def get_snyk_token():
return os.environ.get('SECRET_SNYK_API_TOKEN')
| 85 | 39 |
from __future__ import absolute_import, unicode_literals
def generate_song(i):
return 'dummy:track:song%s' % i
| 117 | 41 |
"""Data transformation utils."""
import datetime
import logging
import HTMLParser
from decimal import Decimal
import dateutil.parser
from gi.repository import Gtk
from datagrid_gtk3.utils import imageutils
from datagrid_gtk3.utils import dateutils
from datagrid_gtk3.utils import stringutils
logger = logging.getLogger(__name__)
_transformers = {}
__all__ = ('get_transformer', 'register_transformer')
def get_transformer(transformer_name):
"""Get transformation for the given name.
:param str transformer_name: the name of the registered transformer
:return: the transformer registered by transformer_name
:rtype: callable
"""
return _transformers.get(transformer_name, None)
def register_transformer(transformer_name, transformer):
"""Register a transformer.
:param str transformer_name: the name to register the transformer
:param callable transformer: the transformer to be registered
"""
assert callable(transformer)
_transformers[transformer_name] = transformer
def unregister_transformer(transformer_name):
"""Unregister a transformer.
:param str transformer_name: the name to register the transformer
:raise KeyError: if a transformer is not registered under the given name
"""
del _transformers[transformer_name]
def transformer(transformer_name):
"""A decorator to easily register a decorator.
Use this like::
@transformer('transformer_name')
def transformer_func(value):
return do_something_with_value()
:param str transformer_name: the name to register the transformer
"""
def _wrapper(f):
register_transformer(transformer_name, f)
return f
return _wrapper
###
# Default transformers
###
@transformer('string')
def string_transform(value, max_length=None, oneline=True,
decode_fallback=None):
"""String transformation.
:param object value: the value that will be converted to
a string
:param int max_length: if not `None`, will be used to
ellipsize the string if greater than that.
:param bool oneline: if we should join all the lines together
in one line
:param callable decode_fallback: a callable to use
to decode value in case it cannot be converted to unicode directly
:return: the string representation of the value
:rtype: str
"""
if value is None:
return '<NULL>'
if isinstance(value, str):
value = unicode(value, 'utf-8', 'replace')
else:
try:
value = unicode(value)
except UnicodeDecodeError:
if decode_fallback is None:
raise
value = decode_fallback(value)
# Replace non-printable characters on the string so the user will
# know that there's something there even though it is not printable.
value = stringutils.replace_non_printable(value)
if oneline:
value = u' '.join(v.strip() for v in value.splitlines() if v.strip())
# Don't show more than max_length chars in treeview. Helps with performance
if max_length is not None and len(value) > max_length:
value = u'%s [...]' % (value[:max_length], )
# At the end, if value is unicode, it needs to be converted to
# an utf-8 encoded str or it won't be rendered in the treeview.
return value.encode('utf-8')
@transformer('html')
def html_transform(value, max_length=None, oneline=True,
decode_fallback=None):
"""HTML transformation.
:param object value: the escaped html that will be unescaped
:param int max_length: if not `None`, will be used to
ellipsize the string if greater than that.
:param bool oneline: if we should join all the lines together
in one line
:param callable decode_fallback: a callable to use
to decode value in case it cannot be converted to unicode directly
:return: the html string unescaped
:rtype: str
"""
if value is None:
return '<NULL>'
html_parser = HTMLParser.HTMLParser()
unescaped = html_parser.unescape(value)
return string_transform(
unescaped, max_length=max_length, oneline=oneline,
decode_fallback=decode_fallback)
@transformer('boolean')
def boolean_transform(value):
"""Transform boolean values to a gtk stock image.
:param bool value: the value to transform
:return: a pixbuf representing the value's bool value
:rtype: :class:`GdkPixbuf.Pixbuf`
"""
img = Gtk.Image()
# NOTE: should be STOCK_NO instead of STOCK_CANCEL but it looks
# crappy in Lubuntu
return img.render_icon(
Gtk.STOCK_YES if value else Gtk.STOCK_CANCEL, Gtk.IconSize.MENU)
@transformer('bytes')
def bytes_transform(value):
"""Transform bytes into a human-readable value.
:param int value: bytes to be humanized
:returns: the humanized bytes
:rtype: str
"""
if value is None:
return ''
for suffix, factor in [
('PB', 1 << 50),
('TB', 1 << 40),
('GB', 1 << 30),
('MB', 1 << 20),
('kB', 1 << 10),
('B', 0)]:
if value >= factor:
value = '%.*f %s' % (1, float(value) / max(factor, 1), suffix)
break
else:
raise ValueError('Unexpected value: %s' % (value, ))
return value
@transformer('datetime')
def datetime_transform(value):
"""Transform datetime to ISO 8601 date format.
:param value: the datatime object
:type value: datetime.datetime
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
if isinstance(value, basestring):
try:
# Try to parse string as a date
value = dateutil.parser.parse(value)
except (OverflowError, TypeError, ValueError):
pass
# FIXME: Fix all places using 'datetime' for timestamp
# (either as an int/long or as a convertable str)
try:
long_value = long(value)
except (TypeError, ValueError):
pass
else:
return timestamp_transform(long_value)
if not isinstance(value, datetime.datetime):
# Convert value to string even if it cannot be parsed as a datetime
logger.warning('Not a datetime: %s', value)
return str(value)
return value.isoformat(' ')
@transformer('timestamp')
@transformer('timestamp_unix')
def timestamp_transform(value, date_only=False):
"""Transform timestamp to ISO 8601 date format.
:param int value: Unix timestamp
:param bool date_only: if we should format only the date part,
ignoring the time
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
try:
dt = datetime.datetime.utcfromtimestamp(value)
except (TypeError, ValueError):
# Convert value to string even if it cannot be parsed as a timestamp
logger.warning('Not a timestamp: %s', value)
return str(value)
if date_only:
return dt.date().isoformat()
else:
return dt.isoformat(' ')
@transformer('timestamp_ms')
@transformer('timestamp_unix_ms')
def timestamp_ms_transform(value):
"""Transform timestamp in milliseconds to ISO 8601 date format.
:param int value: Unix timestamp in milliseconds
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_unix_ms'))
@transformer('timestamp_Ms')
@transformer('timestamp_unix_Ms')
def timestamp_Ms_transform(value):
"""Transform timestamp in microseconds to ISO 8601 date format.
:param int value: Unix timestamp in microseconds
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_unix_Ms'))
@transformer('timestamp_ios')
@transformer('timestamp_apple')
def timestamp_apple_transform(value):
"""Transform apple timestamp to ISO 8601 date format.
Apple timestamps (e.g. those used on iOS) start at 2001-01-01.
:param int value: apple timestamp
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_apple'))
@transformer('timestamp_webkit')
def timestamp_webkit_transform(value):
"""Transform WebKit timestamp to ISO 8601 date format.
WebKit timestamps are expressed in microseconds and
start at 1601-01-01.
:param int value: WebKit timestamp
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_webkit'))
@transformer('timestamp_julian')
def timestamp_julian_transform(value, date_only=False):
"""Transform Julian timestamp to ISO 8601 date format.
Julian timestamps are the number of days that has passed since
noon Universal Time on January 1, 4713 BCE.
:param int value: Julian timestamp in days
:param bool date_only: if we should format only the date part,
ignoring the time
:return: the datetime represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_transform(
dateutils.normalize_timestamp(value, 'timestamp_julian'),
date_only=date_only)
@transformer('timestamp_julian_date')
def timestamp_julian_date_transform(value):
"""Transform julian timestamp to ISO 8601 date format.
Julian timestamps are the number of days that has passed since
noon Universal Time on January 1, 4713 BCE.
:param int value: Julian timestamp
:return: the date represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_julian_transform(value, date_only=True)
@transformer('timestamp_midnight')
def timestamp_midnight_transform(value):
"""Transform midnight timestamp to ISO 8601 time format.
Midnight timestamp is the count in seconds of the time that
has passed since midnight.
:param int value: midnight timestamp in seconds
:return: the time represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
dt = datetime.datetime.min + datetime.timedelta(0, value)
return dt.time().isoformat()
@transformer('timestamp_midnight_ms')
def timestamp_midnight_ms_transform(value):
"""Transform midnight timestamp in milliseconds to ISO 8601 time format.
Midnight timestamp is the count in seconds of the time that
has passed since midnight.
:param int value: midnight timestamp in milliseconds
:return: the time represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_midnight_transform(value / 10 ** 3)
@transformer('timestamp_midnight_Ms')
def timestamp_midnight_Ms_transform(value):
"""Transform midnight timestamp in microsecond to ISO 8601 time format.
Midnight timestamp is the count in seconds of the time that
has passed since midnight.
:param int value: midnight timestamp in microseconds
:return: the time represented in ISO 8601 format
:rtype: str
"""
if value is None:
return ''
return timestamp_midnight_transform(value / 10 ** 6)
@transformer('image')
def image_transform(path, size=24, fill_image=True, draw_border=False,
draft=False, load_on_thread=False):
"""Render path into a pixbuf.
:param str path: the image path or `None` to use a fallback image
:param int size: the size to resize the image. It will be resized
to fit a square of (size, size)
:param bool fill_image: if we should fill the image with a transparent
background to make a smaller image be at least a square of
(size, size), with the real image at the center.
:param bool draw_border: if we should add a border on the image
:param bool draft: if we should load the image as a draft. This
trades a little quality for a much higher performance.
:param bool load_on_thread: if we should load the image on another
thread. This will make a placeholder be returned the first
time this method is called.
:returns: the resized pixbuf
:rtype: :class:`GdkPixbuf.Pixbuf`
"""
cm = imageutils.ImageCacheManager.get_default()
return cm.get_image(path, size, fill_image, draw_border,
draft, load_on_thread)
@transformer('degree_decimal_str')
def degree_decimal_str_transform(value, length=8):
"""Transform degree decimal string to a numeric value.
The string is expected to have <length> digits, if less digits are found,
it will be prefixed with zeroes as needed.
:param value: Degrees encoded as a string with digits
:type value: str
:param length: Maximum expected string length
:type length: int
"""
assert isinstance(value, basestring), 'String value expected'
assert value.isdigit(), 'All characters expected to be digits'
assert len(value) <= length, \
'String length expected to be {} or less'.format(length)
value = value.zfill(length)
# Add decimal point at the expected location
value = '{}.{}'.format(value[:2], value[2:])
# Remove non-significant leading zeroes
value = Decimal(value)
return str(value)
| 13,693 | 3,967 |
import argparse
import logging
import pandas as pd
import numpy as np
from tslearn.clustering import TimeSeriesKMeans
from tqdm import tqdm
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
SEEDS = [27, 28, 29]
MODELS = ['dbert', 'dbidaf', 'droberta', 'squad']
DEV_DATA_SIZES = {
'dbert': 1000,
'dbidaf': 1000,
'droberta': 1000,
'squad': 10570
}
NUM_CHECKPOINTS = 120
def load_per_example_metrics_df(seed: int) -> pd.DataFrame:
logging.info('Loading per example metrics')
example_metric_df = pd.DataFrame()
for model in MODELS:
df = pd.read_csv(f'data/processed/per_example_metrics-squadv1-adversarialall-dataset={model}-seed={seed}.csv')
example_metric_df = example_metric_df.append(df)
assert example_metric_df.shape[0] == NUM_CHECKPOINTS * np.sum(list(DEV_DATA_SIZES.values()))
logging.info(example_metric_df.shape)
return example_metric_df
def _prepare_data(per_example_metrics_df: pd.DataFrame, value: str = 'f1') -> (np.array, dict):
"""
Prepare input array for k-means. Input is of dim (n_ts, sz, d) where n_ts=number of time series; sz=length of
time series; d=dimensionality of time series
:param per_example_metrics_df:
:return:
"""
logging.info('Preparing input for k-means')
per_example_metrics_df = per_example_metrics_df.copy()
per_example_metrics_df.sort_values(['id', 'checkpoint'], inplace=True)
n_ts = per_example_metrics_df['id'].nunique()
assert n_ts == np.sum(list(DEV_DATA_SIZES.values()))
sz = NUM_CHECKPOINTS
d = 1
X = np.zeros((n_ts, sz, d))
# Store mapping for index position to corresponding ID
idx_to_id_dict = {}
for idx, _id in tqdm(
enumerate(per_example_metrics_df['id'].unique()),
total=per_example_metrics_df['id'].nunique()
):
idx_to_id_dict[idx] = _id
X[idx, :, :] = per_example_metrics_df[per_example_metrics_df['id'] == _id][value].values.reshape(-1, 1)
logging.info(X.shape)
return X, idx_to_id_dict
def get_kmeans_clusters(
per_example_metrics_df: pd.DataFrame,
n_clusters: int,
model_seed: int,
km_seed: int,
max_iter: int = 300,
value: str = 'f1'
) -> pd.DataFrame:
X, idx_to_id_dict = _prepare_data(per_example_metrics_df, value=value)
# Fit K-means
logging.info('Fitting k-means')
km = TimeSeriesKMeans(
n_clusters=n_clusters,
metric="dtw",
max_iter=max_iter,
random_state=km_seed,
verbose=0,
n_jobs=-1
)
labels = km.fit_predict(X)
logging.info('Finished k-means')
logging.info('Processing labels')
id_km_labels = []
for idx, _id in idx_to_id_dict.items():
id_km_labels.append((_id, labels[idx]))
id_km_labels_df = pd.DataFrame(id_km_labels, columns=['id', 'KM_label'])
assert id_km_labels_df.shape[0] == np.sum(list(DEV_DATA_SIZES.values()))
id_km_labels_df['km_seed'] = km_seed
id_km_labels_df['model_seed'] = model_seed
logging.info(id_km_labels_df.shape)
logging.info(id_km_labels_df.head())
return id_km_labels_df
def main(seed, km_seed, n_clusters, savepath, max_iter):
per_example_metrics_df = load_per_example_metrics_df(seed=seed)
id_km_labels_df = get_kmeans_clusters(
per_example_metrics_df=per_example_metrics_df,
n_clusters=n_clusters,
model_seed=seed,
km_seed=km_seed,
max_iter=max_iter
)
id_km_labels_df.to_csv(savepath, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int)
parser.add_argument("--km_seed", type=int)
parser.add_argument("--n_clusters", type=int)
parser.add_argument("--savepath", type=str)
parser.add_argument("--max_iter", type=int)
args = parser.parse_args()
main(args.seed, args.km_seed, args.n_clusters, args.savepath, args.max_iter)
| 3,974 | 1,531 |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 22 18:21:32 2020
@author: Siro Moreno
"""
import numpy as np
l = 0.2096
L = 0.2096
L_2 = (L + l) / (2 ** 0.5)
l_2 = 0.0
r = 0.0667
m = 15.75
I_w = 0.00266
I_z = 0.461
phi_dot_max = 2 * np.pi * 7000 / (49 * 60)
psi_dot_max = phi_dot_max * 0.0667 / (2 ** 0.5 * 0.29642)
def max_speed_axes_2(psi_dot):
L_2 = 0.29642
r = 0.0667
phi_dot_max = 2 * np.pi * 7000 / (49 * 60)
return phi_dot_max * r / (2 ** 0.5) - np.abs(L_2 * psi_dot)
| 496 | 324 |
import os
import re
import logging
import pandas as pd
from bs4 import BeautifulSoup
from os.path import join, exists, dirname
from prettytable import PrettyTable
logger = logging.getLogger(__name__)
ALL_TA2S = {'NYU-TA2', 'CMU-TA2', 'UCB-TA2', 'Uncharted-TA2', 'SRI-TA2', 'Texas A&M-TA2', 'D3M ENSEMBLE-TA2', 'NEW-NYU-TA2'}
SKIP_DATASETS = {'LL1_FB15k_237', 'LL1_FB15k_237_V2'} # These datasets use unsupported metrics, so skip them
def get_task_name(task_keywords):
task_name = None
if 'clustering' in task_keywords:
task_name = 'CLUSTERING'
elif 'semi-supervised' in task_keywords:
task_name = 'SEMISUPERVISED_CLASSIFICATION'
elif 'collaborative' in task_keywords:
task_name = 'COLLABORATIVE_FILTERING'
elif 'forecasting' in task_keywords:
task_name = 'TIME_SERIES_FORECASTING'
elif 'lupi' in task_keywords:
task_name = 'LUPI'
elif 'community' in task_keywords:
task_name = 'COMMUNITY_DETECTION'
elif 'link' in task_keywords:
task_name = 'LINK_PREDICTION'
elif 'object' in task_keywords:
task_name = 'OBJECT_DETECTION'
elif 'matching' in task_keywords:
task_name = 'GRAPH_MATCHING'
elif 'series' in task_keywords:
task_name = 'TIME_SERIES_CLASSIFICATION'
elif 'vertex' in task_keywords:
task_name = 'VERTEX_CLASSIFICATION'
elif 'multipleInstanceLearning' in task_keywords:
task_name = 'TABULAR_CLASSIFICATION' # There are no primitives for multi instance classification
elif 'text' in task_keywords:
task_name = 'TEXT_CLASSIFICATION'
elif 'image' in task_keywords and 'classification' in task_keywords:
task_name = 'IMAGE_CLASSIFICATION'
elif 'image' in task_keywords and 'regression' in task_keywords:
task_name = 'IMAGE_REGRESSION'
elif 'audio' in task_keywords:
task_name = 'AUDIO_CLASSIFICATION'
elif 'video' in task_keywords:
task_name = 'VIDEO_CLASSIFICATION'
elif 'classification' in task_keywords:
task_name = 'TABULAR_CLASSIFICATION'
elif 'regression' in task_keywords:
task_name = 'TABULAR_REGRESSION'
return task_name
def get_leaderboard(leaderboard_path):
leaderboard = {}
with open(leaderboard_path) as fin:
html_doc = fin.read()
soup = BeautifulSoup(html_doc, 'html.parser')
items = soup.find_all('div', {'class': 'dropdown-menu'})[1].find_all('li')
logger.info('Found %d datasets', len(items))
datasets = []
task_types = {}
for item in items:
dataset_description = item.get_text().replace('\n', ' ')
match = re.search('(.+) \((.+)\)', dataset_description)
dataset_name, task_keywords = match.group(1), match.group(2)
dataset_name = dataset_name.rstrip()
task_keywords = re.split('\s+', task_keywords.strip())
datasets.append(dataset_name)
task_types[dataset_name] = get_task_name(task_keywords)
tables = soup.find_all('table', {'class': 'dataTable no-footer'})
tables = tables[1:]
logger.info('Found %d tables', len(tables))
for index, table in enumerate(tables):
dataset_name = datasets[index]
if dataset_name in SKIP_DATASETS:
continue
rows = table.find('tbody').find_all('tr')
ranking = []
for index_row, row in enumerate(rows):
cells = row.find_all('td')
team = cells[1].get_text()
score = cells[6].get_text()
baseline = cells[7].get_text()
metric = cells[9].get_text()
if team == 'NYU-TA2' and task_types[dataset_name] not in {'TABULAR_CLASSIFICATION', 'TABULAR_REGRESSION'}:
team = None # We consider NYU-TA2 as the system that supports only classification and regression
if team in ALL_TA2S: # Remove TA1 performances
ranking.append((team, round(float(score), 3)))
new_ranking = add_rank(ranking)
leaderboard[dataset_name] = {'ranking': new_ranking, 'task': task_types[dataset_name]}
return leaderboard
def add_rank(ranking, worst_rank=len(ALL_TA2S)):
new_ranking = {}
previous_score = ranking[0][1]
rank = 1
for team, score in ranking:
if score != previous_score:
rank += 1
new_ranking[team] = {'rank': rank, 'score': score}
previous_score = score
for team in ALL_TA2S:
if team not in new_ranking:
new_ranking[team] = {'rank': worst_rank, 'score': None} # Add the worse rank
return new_ranking
def collect_new_scores(folder_path):
new_scores = {}
datasets = sorted([x for x in os.listdir(folder_path) if os.path.isdir(join(folder_path, x))])
for dataset in datasets:
csv_path = join(folder_path, dataset, 'output/temp/statistics_datasets.csv')
if exists(csv_path):
data = pd.read_csv(csv_path, header=None, sep='\t')
data = data.replace({'None': None})
score = data.iloc[0][4]
metric = data.iloc[0][5]
if score is not None:
score = round(float(score), 3)
new_scores[dataset] = {'score': score, 'metric': metric}
return new_scores
def update_leaderboard(leaderboard, new_scores, new_team):
for dataset, ranking_info in leaderboard.items():
ranking = ranking_info['ranking']
ranking = [(t, s['score']) for t, s in ranking.items() if s['score'] is not None]
if dataset in new_scores:
new_score = new_scores[dataset]['score']
metric = new_scores[dataset]['metric']
ranking.append((new_team, new_score))
is_reverse = 'ERROR' not in metric
ranking = sorted(ranking, key=lambda x: x[1], reverse=is_reverse)
else:
logger.warning('No new score found for dataset %s', dataset)
new_ranking = add_rank(ranking)
leaderboard[dataset] = {'ranking': new_ranking, 'task': ranking_info['task']}
return leaderboard
def calculate_statistics(leaderboard):
team_statistics = {x: {'winner_pipelines': 0, 'avg_rank': 0} for x in ALL_TA2S}
for dataset in leaderboard:
dataset_ranking = leaderboard[dataset]['ranking']
for team in ALL_TA2S:
team_rank = dataset_ranking[team]['rank']
if team_rank == 1:
team_statistics[team]['winner_pipelines'] += 1
team_statistics[team]['avg_rank'] += team_rank
total_datasets = float(len(leaderboard))
for team in team_statistics:
team_statistics[team]['avg_rank'] = round(team_statistics[team]['avg_rank'] / total_datasets, 3)
team_statistics = sorted(team_statistics.items(), key=lambda x: x[1]['winner_pipelines'], reverse=True)
table_team = PrettyTable()
table_team.field_names = ['Team', 'Winner Pipelines', 'Avg. Rank']
for team, statistics in team_statistics:
table_team.add_row([team, statistics['winner_pipelines'], statistics['avg_rank']])
print(table_team)
task_statistics = {}
for dataset in leaderboard:
task = leaderboard[dataset]['task']
if task not in task_statistics:
task_statistics[task] = {'teams': {}, 'total': 0}
task_statistics[task]['total'] += 1
for team in ALL_TA2S:
team_score = leaderboard[dataset]['ranking'][team]['score']
if team not in task_statistics[task]['teams']:
task_statistics[task]['teams'][team] = 0
if team_score is not None:
task_statistics[task]['teams'][team] += 1
ta2s = sorted(ALL_TA2S)
table_task = PrettyTable()
table_task.field_names = ['Tasks', 'Total'] + ta2s
for task in task_statistics:
table_task.add_row([task, task_statistics[task]['total']] + [task_statistics[task]['teams'][x] for x in ta2s])
print(table_task)
logger.info('Top 1 pipeline')
leaderboard_path = join(dirname(__file__), '../../evaluations/leaderboard_december_2020_rank1.html')
leaderboard = get_leaderboard(leaderboard_path)
new_results_path = join(dirname(__file__), '../../evaluations/new_results')
new_scores = collect_new_scores(new_results_path)
leaderboard = update_leaderboard(leaderboard, new_scores, 'NEW-NYU-TA2')
calculate_statistics(leaderboard)
| 8,301 | 2,746 |
def proceso(num, suma=0):
numero = []
for i in str(num):
exp = int(i) ** len(str(num))
numero.append(exp)
if len(numero) == len(str(num)):
total = sum(numero)
return num, total
numero.clear()
entrada = input()
datos = []
for i in range(int(entrada)):
entrada2 = input()
datos.append(entrada2)
for n in datos:
resul1, resul2 = proceso(int(n))
if resul1 == resul2:
print("Armstrong")
elif resul1 != resul2:
print("Not Armstrong")
| 522 | 216 |
from django.conf.urls import url
from django.views.generic import TemplateView
from core import views
from core.feeds import rss_feed, atom_feed
app_name = 'core'
urlpatterns = [
url(r'^$', views.blog, name='home'),
url(r'^blog/$', views.blog, name='blog'),
url(r'^blog/(?P<slug>.*)/$', views.entry, name='entry'),
url(r'^subscribe/$', views.subscribe, name='subscribe'),
url(r'^unsubscribe/$', views.unsubscribe, name='unsubscribe'),
url(r'^contact/$', TemplateView.as_view(template_name="core/contact.html")),
url(r'^alertthepress/$', views.alert_the_press, name='alert-the-press'),
url(r'^rss/$', rss_feed()),
url(r'^atom/$', atom_feed()),
] | 683 | 245 |
import command
import sys
count = 0
newEnvVarName = []
newEnvVarValue = []
for I in command.env_var_name:
newEnvVarName.append(I.replace("\n",""))
for I in command.env_var_value:
newEnvVarValue.append(I.replace("\n",""))
endPrint = False
for I in command.env_var_name:
if(command.new_arg_parser("-0") or command.new_arg_parser("--null")):
print(str(I).replace("\n","")+"="+str(command.env_var_value[count]).replace("\n",""),end=" ")
endPrint = True
else:
count3=0
for D in sys.argv:
if(len(sys.argv)!=1):
if(D not in newEnvVarName and count3!=0):
print(str(I).replace("\n","")+"="+str(command.env_var_value[count]).replace("\n",""))
break
else:
print(str(I).replace("\n","")+"="+str(command.env_var_value[count]).replace("\n",""))
break
count3+=1
count+=1
if(endPrint == True):
print()
if(len(sys.argv)!=1):
for I in sys.argv:
if(I in newEnvVarName):
newCount = 0
for O in newEnvVarName:
if(I == O):
print(newEnvVarValue[newCount])
newCount+=1
| 1,110 | 394 |
import datetime
from scripts.ilapfuncs import timeline, open_sqlite_db_readonly
from scripts.plugin_base import ArtefactPlugin
from scripts.ilapfuncs import logfunc, tsv
from scripts import artifact_report
class TextNowCallLogsPlugin(ArtefactPlugin):
"""
"""
def __init__(self):
super().__init__()
self.author = 'Unknown'
self.author_email = ''
self.author_url = ''
self.category = 'Text Now'
self.name = 'Call Logs'
self.description = ''
self.artefact_reference = '' # Description on what the artefact is.
self.path_filters = ['**/com.enflick.android.TextNow/databases/textnow_data.db*'] # Collection of regex search filters to locate an artefact.
self.icon = 'phone' # feathricon for report.
def _processor(self) -> bool:
source_file_msg = ''
for file_found in self.files_found:
file_name = str(file_found)
if file_name.endswith('textnow_data.db'):
textnow_db = str(file_found)
source_file_msg = file_found.replace(self.seeker.directory, '')
db = open_sqlite_db_readonly(textnow_db)
cursor = db.cursor()
try:
cursor.execute('''
SELECT contact_value AS num,
case message_direction when 2 then "Outgoing" else "Incoming" end AS direction,
date/1000 + message_text AS duration,
date/1000 AS datetime
FROM messages AS M
WHERE message_type IN ( 100, 102 )
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
except:
usageentries = 0
if usageentries > 0:
data_headers = ('Start Time', 'End Time', 'From ID', 'To ID', 'Call Direction')
data_list = []
for row in all_rows:
phone_number_from = None
phone_number_to = None
if row[1] == "Outgoing":
phone_number_to = row[0]
else:
phone_number_from = row[0]
starttime = datetime.datetime.fromtimestamp(int(row[3])).strftime('%Y-%m-%d %H:%M:%S')
endtime = datetime.datetime.fromtimestamp(int(row[2])).strftime('%Y-%m-%d %H:%M:%S')
data_list.append((starttime, endtime, phone_number_from, phone_number_to, row[1]))
artifact_report.GenerateHtmlReport(self, file_found, data_headers, data_list)
tsv(self.report_folder, data_headers, data_list, self.full_name(), source_file_msg)
timeline(self.report_folder, self.full_name(), data_list, data_headers)
else:
logfunc('No Text Now Call Logs found')
db.close()
return True
| 3,050 | 851 |
from django.apps import apps
from django import forms
from django.contrib.auth import get_user_model
from .models import Follow
# Follow = apps.get_model('Users', 'Follow')
class FollowForm(forms.ModelForm):
class Meta:
exclude = set()
model = Follow
| 277 | 80 |
import launch
from launch.substitutions import Command, LaunchConfiguration
from launch_ros.substitutions import FindPackageShare
from launch_ros.actions import Node
import os
def generate_launch_description():
pkg_share = FindPackageShare(package='omnibot_description').find('omnibot_description')
default_model_path = os.path.join(pkg_share, 'urdf/omnibot.urdf.xacro')
default_rviz_config_path = os.path.join(pkg_share, 'rviz/urdf_config.rviz')
robot_state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
parameters=[{'robot_description': Command(['xacro ', LaunchConfiguration('model')])}]
)
joint_state_publisher = Node(
package='joint_state_publisher',
executable='joint_state_publisher',
name='joint_state_publisher',
condition=launch.conditions.UnlessCondition(LaunchConfiguration('gui'))
)
joint_state_publisher_gui = Node(
package='joint_state_publisher_gui',
executable='joint_state_publisher_gui',
name='joint_state_publisher_gui',
condition=launch.conditions.IfCondition(LaunchConfiguration('gui'))
)
rviz_node = Node(
package='rviz2',
executable='rviz2',
name='rviz2',
output='screen',
arguments=['-d', LaunchConfiguration('rviz_config')],
)
return launch.LaunchDescription([
launch.actions.DeclareLaunchArgument(name='gui', default_value='True',
description='Flag to enable joint_state_publisher_gui'),
launch.actions.DeclareLaunchArgument(name='model', default_value=default_model_path,
description='Absolute path to robot urdf file'),
launch.actions.DeclareLaunchArgument(name='rviz_config', default_value=default_rviz_config_path,
description='Absolute path to rviz config file'),
launch.actions.DeclareLaunchArgument(name='use_sim_time',default_value='true',
description='Use simulation (Gazebo) clock if true'),
joint_state_publisher_gui,
joint_state_publisher,
robot_state_publisher,
rviz_node
])
| 2,145 | 627 |
from .gl import GL20, GL21, GL30, GL31, GL32, GL33, GL40, GL41, GL42, GL43, GL44, GL45, GL_ANY
from .enum import *
from .constant import * | 138 | 69 |
from .object import *
from .ns import *
from .io import *
from .time import *
| 78 | 25 |
import pandas as pd
import hashlib
import json
import traceback
from pymysql import IntegrityError
from datetime import datetime, timedelta
from db_adapter.logger import logger
from db_adapter.exceptions import DatabaseAdapterError, DuplicateEntryError
from db_adapter.curw_obs.station import StationEnum
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
class Timeseries:
def __init__(self, pool):
self.pool = pool
@staticmethod
def generate_timeseries_id(meta_data):
# def generate_timeseries_id(meta_data: object) -> object:
"""
Generate the event id for given metadata
Only 'latitude', 'longitude', 'station_type', 'variable', 'unit', 'unit_type'
are used to generate the id (i.e. hash value)
:param meta_data: Dict with 'latitude', 'longitude', 'station_type', 'variable',
'unit', 'unit_type' keys
:return: str: sha256 hash value in hex format (length of 64 characters)
"""
sha256 = hashlib.sha256()
hash_data = {
'latitude' : '',
'longitude' : '',
'station_type': '',
'variable' : '',
'unit' : '',
'unit_type' : ''
}
for key in hash_data.keys():
hash_data[key] = meta_data[key]
sha256.update(json.dumps(hash_data, sort_keys=True).encode("ascii"))
event_id = sha256.hexdigest()
return event_id
def get_timeseries_id_if_exists(self, meta_data):
"""
Check whether a timeseries id exists in the database for a given set of meta data
:param meta_data: Dict with 'latitude', 'longitude', 'station_type', 'variable',
'unit', 'unit_type' keys
:return: timeseries id if exist else raise DatabaseAdapterError
"""
event_id = self.generate_timeseries_id(meta_data)
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT 1 FROM `run` WHERE `id`=%s"
is_exist = cursor.execute(sql_statement, event_id)
return event_id if is_exist > 0 else None
except Exception as exception:
error_message = "Retrieving timeseries id for metadata={} failed.".format(meta_data)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def is_id_exists(self, id_):
"""
Check whether a given timeseries id exists in the database
:param id_:
:return: True, if id is in the database, False otherwise
"""
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT 1 FROM `run` WHERE `id`=%s"
is_exist = cursor.execute(sql_statement, id_)
return True if is_exist > 0 else False
except Exception as exception:
error_message = "Check operation to find timeseries id {} in the run table failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise False
finally:
if connection is not None:
connection.close()
def insert_data(self, timeseries, upsert=False):
"""
Insert timeseries to Data table in the database
:param timeseries: list of [tms_id, time, value] lists
:param boolean upsert: If True, upsert existing values ON DUPLICATE KEY. Default is False.
Ref: 1). https://stackoverflow.com/a/14383794/1461060
2). https://chartio.com/resources/tutorials/how-to-insert-if-row-does-not-exist-upsert-in-mysql/
:return: row count if insertion was successful, else raise DatabaseAdapterError
"""
row_count = 0
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
if upsert:
sql_statement = "INSERT INTO `data` (`id`, `time`, `value`) VALUES (%s, %s, %s) " \
"ON DUPLICATE KEY UPDATE `value`=VALUES(`value`)"
else:
sql_statement = "INSERT INTO `data` (`id`, `time`, `value`) VALUES (%s, %s, %s)"
row_count = cursor.executemany(sql_statement, timeseries)
connection.commit()
return row_count
except Exception as exception:
connection.rollback()
error_message = "Data insertion to data table for tms id {}, upsert={} failed.".format(timeseries[0][0],
upsert)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
# def insert_timeseries(self, timeseries, run_tuple):
#
# """
# Insert new timeseries into the Run table and Data table, for given timeseries id
# :param tms_id:
# :param timeseries: list of [tms_id, time, value] lists
# :param run_tuple: tuples like
# (tms_id[0], run_name[1], start_date[2], end_date[3], station_id[4], variable_id[5], unit_id[6])
# :return: timeseries id if insertion was successful, else raise DatabaseAdapterError
# """
#
# connection = self.pool.connection()
# try:
#
# with connection.cursor() as cursor:
# sql_statement = "INSERT INTO `run` (`id`, `run_name`, `start_date`, `end_date`, `station`, " \
# "`variable`, `unit`) " \
# "VALUES ( %s, %s, %s, %s, %s, %s, %s)"
# sql_values = run_tuple
# cursor.execute(sql_statement, sql_values)
#
# connection.commit()
# self.insert_data(timeseries, True)
# return run_tuple[0]
# except Exception as exception:
# connection.rollback()
# error_message = "Insertion failed for timeseries with tms_id={}, run_name={}, station_id={}, " \
# " variable_id={}, unit_id={}" \
# .format(run_tuple[0], run_tuple[1], run_tuple[4], run_tuple[5], run_tuple[6])
# logger.error(error_message)
# traceback.print_exc()
# raise exception
# finally:
# if connection is not None:
# connection.close()
# def insert_run(self, run_tuple):
# """
# Insert new run entry
# :param run_tuple: tuple like
# (tms_id[0], run_name[1], start_date[2], end_date[3], station_id[4], variable_id[5], unit_id[6])
# :return: timeseries id if insertion was successful, else raise DatabaseAdapterError
# """
#
# connection = self.pool.connection()
# try:
#
# with connection.cursor() as cursor:
# sql_statement = "INSERT INTO `run` (`id`, `run_name`, `start_date`, `end_date`, `station`, " \
# "`variable`, `unit`) " \
# "VALUES ( %s, %s, %s, %s, %s, %s, %s)"
# cursor.execute(sql_statement, run_tuple)
#
# connection.commit()
# return run_tuple[0]
# except Exception as exception:
# connection.rollback()
# error_message = "Insertion failed for run enty with tms_id={}, run_name={}, station_id={}, " \
# " variable_id={}, unit_id={}" \
# .format(run_tuple[0], run_tuple[1], run_tuple[4], run_tuple[5], run_tuple[6])
# logger.error(error_message)
# traceback.print_exc()
# raise exception
# finally:
# if connection is not None:
# connection.close()
def insert_run(self, run_meta):
"""
Insert new run entry
:param run_meta: dictionary like
{
'tms_id' : '',
'run_name' : '',
'station_id' : '',
'unit_id' : '',
'variable_id': ''
}
:return: timeseries id if insertion was successful, else raise DatabaseAdapterError
"""
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "INSERT INTO `run` (`id`, `station`, `variable`, `unit`) " \
"VALUES ( %s, %s, %s, %s)"
cursor.execute(sql_statement, (run_meta.get('tms_id'), run_meta.get('station_id'),
run_meta.get('variable_id'), run_meta.get('unit_id')))
connection.commit()
return run_meta.get('tms_id')
except Exception as exception:
connection.rollback()
error_message = "Insertion failed for run entry with tms_id={}, station_id={}, " \
" variable_id={}, unit_id={}" \
.format(run_meta.get('tms_id'), run_meta.get('station_id'),
run_meta.get('variable_id'), run_meta.get('unit_id'))
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_end_date(self, id_):
"""
Retrieve end date
:param id_: timeseries id
:return: end_date
"""
connection = self.pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `end_date` FROM `run` WHERE `id`=%s"
row_count= cursor.execute(sql_statement, id_)
if row_count > 0:
return cursor.fetchone()['end_date']
return None
except Exception as exception:
error_message = "Retrieving end_date for id={} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def update_end_date(self, id_, end_date):
"""
Update end_date for inserted timeseries, if end date is latest date than the existing one
:param id_: timeseries id
:return: end_date if update is successful, else raise DatabaseAdapterError
"""
connection = self.pool.connection()
if type(end_date) is str:
end_date = datetime.strptime(end_date, COMMON_DATE_TIME_FORMAT)
existing_end_date = None
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `end_date` FROM `run` WHERE `id`=%s"
row_count= cursor.execute(sql_statement, id_)
if row_count > 0:
existing_end_date = cursor.fetchone()['end_date']
if existing_end_date is None or existing_end_date < end_date:
with connection.cursor() as cursor:
sql_statement = "UPDATE `run` SET `end_date`=%s WHERE `id`=%s"
cursor.execute(sql_statement, (end_date, id_))
connection.commit()
return end_date
except Exception as exception:
connection.rollback()
error_message = "Updating end_date for id={} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def update_start_date(self, id_, start_date):
"""
Update (very first obs date) start_date for inserted timeseries, if start_date is earlier date than the existing one
:param id_: timeseries id
:return: start_date if update is successful, else raise DatabaseAdapterError
"""
connection = self.pool.connection()
if type(start_date) is str:
start_date = datetime.strptime(start_date, COMMON_DATE_TIME_FORMAT)
existing_start_date = None
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `start_date` FROM `run` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
if row_count > 0:
existing_start_date = cursor.fetchone()['start_date']
if existing_start_date is None or existing_start_date > start_date:
with connection.cursor() as cursor:
sql_statement = "UPDATE `run` SET `start_date`=%s WHERE `id`=%s"
cursor.execute(sql_statement, (start_date, id_))
connection.commit()
return start_date
except Exception as exception:
connection.rollback()
error_message = "Updating start_date for id={} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
| 13,307 | 3,692 |
"""Sales app URLs."""
# Django
from django.urls import path, include
# Views
from .views import customers as customers_views
from .views import sales as sales_views
urlpatterns = [
path('customers/', customers_views.CustomerFilterListViev.as_view(), name='customers'),
path('customers/create/', customers_views.CustomerCreateView.as_view(), name='customer-create'),
path('customers/<int:pk>/', customers_views.CustomerDetailView.as_view(), name='customer'),
path('customers/update/<int:pk>/', customers_views.CustomerUpdateView.as_view(), name='customer-update'),
path('customers/delete/<int:pk>/', customers_views.CustomerDelete.as_view(), name='customer-delete'),
path('sales/', sales_views.SaleFilterListView.as_view(), name='sales'),
path('sales/create/', sales_views.SaleCreateView.as_view(), name='sale-create'),
path('sales/<int:pk>/', sales_views.SaleDetailView.as_view(), name='sale'),
path('sales/update/<int:pk>/', sales_views.SaleUpdateView.as_view(), name='sale-update'),
path('sales/delete/<int:pk>/', sales_views.SaleDelete.as_view(), name='sale-delete'),
]
| 1,148 | 389 |
############### for ###################
nums = [1, 2, 3, 4, 5]
# simple loop
for num in nums:
print(num)
# break keyword
for num in nums:
if num == 3:
print("found")
break
print(num)
# continue keyword
for num in nums:
if num == 3:
print("found")
continue
print(num)
# nested loop
for num in nums:
for letter in 'abc':
print(num, letter)
# range()
for i in range(10):
print(i)
for i in range(1, 11):
print(i)
############## while #################
x = 0
while x < 10:
if x == 5:
break
print(x)
x = x + 1
### infinity loop ###
# while True:
# print(x)
# x = x + 1
| 671 | 258 |
# Method I:
# Accepted while the time complexity equals to O(nlogn) due to the sort function.
# 66.68%; 144ms.
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
len_1 = len(nums1)
len_2 = len(nums2)
res = sorted(nums1+nums2)
total_length = len_1 + len_2
if total_length % 2 == 1:
return res[int(total_length/2)]
else:
return (res[int(total_length/2-1)]+res[int(total_length/2)])/2
# Method II:
# Accepted while the time complexity equals to O(n) due to while circulation.
# 86.20%; 124ms.
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
len_1 = len(nums1)
len_2 = len(nums2)
total_length = len_1 + len_2
res = []
i = j = 0
while i < len_1 and j < len_2:
if nums1[i] < nums2[j]:
res.append(nums1[i])
i += 1
elif nums1[i] > nums2[j]:
res.append(nums2[j])
j += 1
else:
res.extend([nums1[i], nums2[j]])
i += 1
j += 1
if i == len_1:
res.extend(nums2[j:])
if j == len_2:
res.extend(nums1[i:])
if total_length % 2 == 1:
return res[int(total_length/2)]
else:
return (res[int(total_length/2-1)]+res[int(total_length/2)])/2
# Method III:
# Target time complexity: O(logn)
# To be continued...
| 1,704 | 618 |
# pylint: disable = C0111, C0103, C0411, C0301, W0102, C0330, W0603
"""This module is used to extract the key_holds times and digraph
up_down times from the raw events of the subjects."""
import read_write
import numpy as np
import operator
import time
import general_purpose
import sys
import py_node_communication as pynocomm
# All limits are in milliseconds
KEY_HOLD_UPPER_LIMIT = 400
KEY_HOLD_LOWER_LIMIT = 0
DIGRAPH_UP_DOWN_UPPER_LIMIT = 800
DIGRAPH_UP_DOWN_LOWER_LIMIT = -400
def _my_search_event(eventlist, event, key=''):
"""Searches a list of events for specific event and the specific key if specified
\n eventlist: A list of raw events of a subject
\n event: Can be 'keystrokeDown' or 'keystrokeUp'
\n key: It specifies a spefic key (e.g. KeyE or Space)
"""
if key == '':
for i, val in enumerate(eventlist):
if val['event'] == event:
return i, val
else:
for i, val in enumerate(eventlist):
if val['event'] == event and val['key'] == key:
return i, val
pynocomm.send_to_node(
'Returning -1 from my searchevent' + str(event) + str(key))
return -1, {}
def _digraph_all(subject_events_data, ignore_space=False, sortByDigraph=True):
"""Extracts the subject's digraph timings of key_holds and up_down by the raw events.
\n subject_events_data: The raw events list
\n ignore_space: Boolean. If True it ignores space
\n sortByDigraph: Boolean. If True it sorts data by digraph
\n Returns: An list of dicts [{'digraph', 'points'}]
where points is a nx3 numpy array with x,y,z as key_hold_1, key_hold_2 and up_down timings of the digraph
"""
ret = []
# work with a copy because the pop method changes the list of dict :/
events = subject_events_data[:]
if ignore_space is True:
events = [evt for evt in events if evt['data'] != 'Space']
while True:
if len(events) <= 2:
break
# The next keyDown event will be the first
key_1_down_event = events[0]
if key_1_down_event['event'] != 'keystrokeDown':
pynocomm.send_to_node(
'...digraph_all: Continuing, first event is not keydown ->' + str(events[0]))
events.pop(0)
continue
# Find the respective keyUp event of key_1
# DEBUGGING
# pynocomm.send_to_node(str(key_1_down_event))
# DEBUGGING
key_1_up_event_index, key_1_up_event = _my_search_event(
events[1:], 'keystrokeUp', key_1_down_event['key'])
if key_1_up_event_index == -1:
pynocomm.send_to_node(
'...digraph_all: Continuing, Couldnt find keystrokeUp event for key = ' + str(key_1_down_event['key']))
events.pop(0)
continue
else:
key_1_up_event_index += 1
# Find the following keyDown event after the keyDown of key_1
key_2_down_event_index, key_2_down_event = _my_search_event(
events[1:], 'keystrokeDown')
if key_2_down_event_index == -1:
pynocomm.send_to_node('1993: What now?')
else:
key_2_down_event_index += 1
# Find the respective keyUp event of key_2
key_2_up_event_index, key_2_up_event = _my_search_event(
events[key_2_down_event_index + 1:], 'keystrokeUp', key_2_down_event['key'])
if key_2_up_event_index == -1:
# Just pop and continue (it's noise)
events.pop(0)
events.pop(key_1_up_event_index - 1) # index has changed now
pynocomm.send_to_node('1994: Removed Noise')
continue
else:
key_2_up_event_index += key_2_down_event_index + 1
# Calculate
# Here if I want down_down: "down_down": key_2_down_event['timestamp'] - key_1_down_event['timestamp'],
digraph_obj = {
"digraph": key_1_down_event['key'] + key_2_down_event['key'],
"up_down": key_2_down_event['timestamp'] - key_1_up_event['timestamp'],
"key_holds": [key_1_up_event['timestamp'] - key_1_down_event['timestamp'], key_2_up_event['timestamp'] - key_2_down_event['timestamp']]
}
xyz = np.array([[digraph_obj['key_holds'][0],
digraph_obj['key_holds'][1], digraph_obj['up_down']]])
# Store appropriately
if (general_purpose.is_not_extreme_outlier(digraph_obj['up_down'], DIGRAPH_UP_DOWN_LOWER_LIMIT, DIGRAPH_UP_DOWN_UPPER_LIMIT)
and general_purpose.is_not_extreme_outlier(digraph_obj['key_holds'][0], KEY_HOLD_LOWER_LIMIT, KEY_HOLD_UPPER_LIMIT)
and general_purpose.is_not_extreme_outlier(digraph_obj['key_holds'][1], KEY_HOLD_LOWER_LIMIT, KEY_HOLD_UPPER_LIMIT)):
if ret == []:
ret.append({"digraph": digraph_obj['digraph'],
"points": xyz})
else:
tmpi = -1
for i, val in enumerate(ret):
if val['digraph'] == digraph_obj['digraph']:
tmpi = i
break
if tmpi != -1:
ret[tmpi]['points'] = np.append(
ret[tmpi]['points'], xyz, axis=0)
else:
ret.append({"digraph": digraph_obj['digraph'],
"points": xyz})
# Update and remove the 1st key down and up for next iteration
events.pop(0)
events.pop(key_1_up_event_index - 1) # index has changed now
# Sort by Digraph
if sortByDigraph is True:
ret = sorted(ret, key=operator.itemgetter('digraph'))
return ret
def one(doc, ignore_space=False, logg=True):
"""Extracts digraph up_down and key_holds times from subject doc events
\nReturns: Object with 'id', 'subject', 'track_code' and 'data': as calculated from digraph_all func
"""
start = time.time()
ret = {
"_id": doc['_id'],
"subject": doc['subject'], "track_code": doc['track_code'],
"data": _digraph_all(doc['sessions']['data'], ignore_space=ignore_space, sortByDigraph=True)}
if logg is True:
pynocomm.send_to_node(
'-Subject Timings of "' + doc['subject'] + '" extracted in ' + str(time.time() - start) + ' seconds.')
return ret
def all(docs, write_to_json=True, ignore_space=False, filename='./trained-projects/subjects-data'):
"""Just some wrapper that takes all docs
\nReturns: [{'_id':'', 'subject': '', 'track_code': '', data: {[...]]}}]
"""
ret = []
for subject_doc in docs:
ret.append(one(subject_doc, ignore_space=ignore_space))
if write_to_json is True:
read_write.write_timings_to_local(ret, filename)
return ret
def main():
"""It is called by node.js to extract data of all subjects."""
_DATA_ = pynocomm.receive_from_node()
DOCS = _DATA_['docs']
WRITE_EXTRACTED_TO_JSON = _DATA_['writeExtractedToJson']
TIM_LIMITS = _DATA_['timing_limits']
global KEY_HOLD_LOWER_LIMIT
KEY_HOLD_LOWER_LIMIT = TIM_LIMITS['key_hold']['min']
global KEY_HOLD_UPPER_LIMIT
KEY_HOLD_UPPER_LIMIT = TIM_LIMITS['key_hold']['max']
global DIGRAPH_UP_DOWN_LOWER_LIMIT
DIGRAPH_UP_DOWN_LOWER_LIMIT = TIM_LIMITS['digraph_up_down']['min']
global DIGRAPH_UP_DOWN_UPPER_LIMIT
DIGRAPH_UP_DOWN_UPPER_LIMIT = TIM_LIMITS['digraph_up_down']['max']
# Convert numpy arr to list, to be JSON serializable
_data = all(DOCS, write_to_json=WRITE_EXTRACTED_TO_JSON,
filename='./trained-projects/' + DOCS[0]['track_code'])
for d in _data:
for p in d['data']:
p['points'] = p['points'].tolist()
pynocomm.send_to_node(_data)
if __name__ == '__main__':
main()
| 7,830 | 2,678 |
import errno
import os
import shutil
import struct
from binascii import unhexlify
from collections import defaultdict
from configparser import ConfigParser
from datetime import datetime
from functools import partial
from itertools import islice
from zlib import crc32
import msgpack
import logging
logger = logging.getLogger(__name__)
from .constants import * # NOQA
from .hashindex import NSIndex
from .helpers import Error, ErrorWithTraceback, IntegrityError, InternalOSError
from .helpers import Location
from .helpers import ProgressIndicatorPercent
from .helpers import bin_to_hex
from .locking import UpgradableLock, LockError, LockErrorT
from .lrucache import LRUCache
from .platform import SyncFile, sync_dir
MAX_OBJECT_SIZE = 20 * 1024 * 1024
MAGIC = b'BORG_SEG'
MAGIC_LEN = len(MAGIC)
TAG_PUT = 0
TAG_DELETE = 1
TAG_COMMIT = 2
FreeSpace = partial(defaultdict, int)
class Repository:
"""
Filesystem based transactional key value store
Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files
called segments. Each segment is a series of log entries. The segment number together with the offset of each
entry relative to its segment start establishes an ordering of the log entries. This is the "definition" of
time for the purposes of the log.
Log entries are either PUT, DELETE or COMMIT.
A COMMIT is always the final log entry in a segment and marks all data from the beginning of the log until the
segment ending with the COMMIT as committed and consistent. The segment number of a segment ending with a COMMIT
is called the transaction ID of that commit, and a segment ending with a COMMIT is called committed.
When reading from a repository it is first checked whether the last segment is committed. If it is not, then
all segments after the last committed segment are deleted; they contain log entries whose consistency is not
established by a COMMIT.
Note that the COMMIT can't establish consistency by itself, but only manages to do so with proper support from
the platform (including the hardware). See platform.base.SyncFile for details.
A PUT inserts a key-value pair. The value is stored in the log entry, hence the repository implements
full data logging, meaning that all data is consistent, not just metadata (which is common in file systems).
A DELETE marks a key as deleted.
For a given key only the last entry regarding the key, which is called current (all other entries are called
superseded), is relevant: If there is no entry or the last entry is a DELETE then the key does not exist.
Otherwise the last PUT defines the value of the key.
By superseding a PUT (with either another PUT or a DELETE) the log entry becomes obsolete. A segment containing
such obsolete entries is called sparse, while a segment containing no such entries is called compact.
Sparse segments can be compacted and thereby disk space freed. This destroys the transaction for which the
superseded entries where current.
On disk layout:
dir/README
dir/config
dir/data/<X // SEGMENTS_PER_DIR>/<X>
dir/index.X
dir/hints.X
"""
class DoesNotExist(Error):
"""Repository {} does not exist."""
class AlreadyExists(Error):
"""Repository {} already exists."""
class InvalidRepository(Error):
"""{} is not a valid repository. Check repo config."""
class CheckNeeded(ErrorWithTraceback):
"""Inconsistency detected. Please run "borg check {}"."""
class ObjectNotFound(ErrorWithTraceback):
"""Object with key {} not found in repository {}."""
def __init__(self, path, create=False, exclusive=False, lock_wait=None, lock=True):
self.path = os.path.abspath(path)
self._location = Location('file://%s' % self.path)
self.io = None
self.lock = None
self.index = None
self._active_txn = False
self.lock_wait = lock_wait
self.do_lock = lock
self.do_create = create
self.exclusive = exclusive
def __del__(self):
if self.lock:
self.close()
assert False, "cleanup happened in Repository.__del__"
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.path)
def __enter__(self):
if self.do_create:
self.do_create = False
self.create(self.path)
self.open(self.path, self.exclusive, lock_wait=self.lock_wait, lock=self.do_lock)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.rollback()
self.close()
@property
def id_str(self):
return bin_to_hex(self.id)
def create(self, path):
"""Create a new empty repository at `path`
"""
if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
raise self.AlreadyExists(path)
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, 'README'), 'w') as fd:
fd.write('This is a Borg repository\n')
os.mkdir(os.path.join(path, 'data'))
config = ConfigParser(interpolation=None)
config.add_section('repository')
config.set('repository', 'version', '1')
config.set('repository', 'segments_per_dir', str(DEFAULT_SEGMENTS_PER_DIR))
config.set('repository', 'max_segment_size', str(DEFAULT_MAX_SEGMENT_SIZE))
config.set('repository', 'append_only', '0')
config.set('repository', 'id', bin_to_hex(os.urandom(32)))
self.save_config(path, config)
def save_config(self, path, config):
config_path = os.path.join(path, 'config')
with open(config_path, 'w') as fd:
config.write(fd)
def save_key(self, keydata):
assert self.config
keydata = keydata.decode('utf-8') # remote repo: msgpack issue #99, getting bytes
self.config.set('repository', 'key', keydata)
self.save_config(self.path, self.config)
def load_key(self):
keydata = self.config.get('repository', 'key')
return keydata.encode('utf-8') # remote repo: msgpack issue #99, returning bytes
def destroy(self):
"""Destroy the repository at `self.path`
"""
if self.append_only:
raise ValueError(self.path + " is in append-only mode")
self.close()
os.remove(os.path.join(self.path, 'config')) # kill config first
shutil.rmtree(self.path)
def get_index_transaction_id(self):
indices = sorted((int(name[6:]) for name in os.listdir(self.path) if name.startswith('index.') and name[6:].isdigit()))
if indices:
return indices[-1]
else:
return None
def check_transaction(self):
index_transaction_id = self.get_index_transaction_id()
segments_transaction_id = self.io.get_segments_transaction_id()
if index_transaction_id is not None and segments_transaction_id is None:
raise self.CheckNeeded(self.path)
# Attempt to automatically rebuild index if we crashed between commit
# tag write and index save
if index_transaction_id != segments_transaction_id:
if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
replay_from = None
else:
replay_from = index_transaction_id
self.replay_segments(replay_from, segments_transaction_id)
def get_transaction_id(self):
self.check_transaction()
return self.get_index_transaction_id()
def break_lock(self):
UpgradableLock(os.path.join(self.path, 'lock')).break_lock()
def open(self, path, exclusive, lock_wait=None, lock=True):
self.path = path
if not os.path.isdir(path):
raise self.DoesNotExist(path)
if lock:
self.lock = UpgradableLock(os.path.join(path, 'lock'), exclusive, timeout=lock_wait).acquire()
else:
self.lock = None
self.config = ConfigParser(interpolation=None)
self.config.read(os.path.join(self.path, 'config'))
if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
raise self.InvalidRepository(path)
self.max_segment_size = self.config.getint('repository', 'max_segment_size')
self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
self.append_only = self.config.getboolean('repository', 'append_only', fallback=False)
self.id = unhexlify(self.config.get('repository', 'id').strip())
self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
def close(self):
if self.lock:
if self.io:
self.io.close()
self.io = None
self.lock.release()
self.lock = None
def commit(self, save_space=False):
"""Commit transaction
"""
self.io.write_commit()
if not self.append_only:
self.compact_segments(save_space=save_space)
self.write_index()
self.rollback()
def open_index(self, transaction_id, auto_recover=True):
if transaction_id is None:
return NSIndex()
index_path = os.path.join(self.path, 'index.%d' % transaction_id).encode('utf-8')
try:
return NSIndex.read(index_path)
except RuntimeError as error:
assert str(error) == 'hashindex_read failed' # everything else means we're in *deep* trouble
logger.warning('Repository index missing or corrupted, trying to recover')
try:
os.unlink(index_path)
except OSError as e:
raise InternalOSError(e) from None
if not auto_recover:
raise
self.prepare_txn(self.get_transaction_id())
# don't leave an open transaction around
self.commit()
return self.open_index(self.get_transaction_id())
except OSError as e:
raise InternalOSError(e) from None
def prepare_txn(self, transaction_id, do_cleanup=True):
self._active_txn = True
try:
self.lock.upgrade()
except (LockError, LockErrorT):
# if upgrading the lock to exclusive fails, we do not have an
# active transaction. this is important for "serve" mode, where
# the repository instance lives on - even if exceptions happened.
self._active_txn = False
raise
if not self.index or transaction_id is None:
try:
self.index = self.open_index(transaction_id, False)
except RuntimeError:
self.check_transaction()
self.index = self.open_index(transaction_id, False)
if transaction_id is None:
self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x]
self.compact = FreeSpace() # XXX bad name: freeable_space_of_segment_x = self.compact[x]
else:
if do_cleanup:
self.io.cleanup(transaction_id)
hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
index_path = os.path.join(self.path, 'index.%d' % transaction_id)
try:
with open(hints_path, 'rb') as fd:
hints = msgpack.unpack(fd)
except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError) as e:
logger.warning('Repository hints file missing or corrupted, trying to recover')
if not isinstance(e, FileNotFoundError):
os.unlink(hints_path)
# index must exist at this point
os.unlink(index_path)
self.check_transaction()
self.prepare_txn(transaction_id)
return
except OSError as os_error:
raise InternalOSError(os_error) from None
if hints[b'version'] == 1:
logger.debug('Upgrading from v1 hints.%d', transaction_id)
self.segments = hints[b'segments']
self.compact = FreeSpace()
for segment in sorted(hints[b'compact']):
logger.debug('Rebuilding sparse info for segment %d', segment)
self._rebuild_sparse(segment)
logger.debug('Upgrade to v2 hints complete')
elif hints[b'version'] != 2:
raise ValueError('Unknown hints file version: %d' % hints[b'version'])
else:
self.segments = hints[b'segments']
self.compact = FreeSpace(hints[b'compact'])
def write_index(self):
hints = {b'version': 2,
b'segments': self.segments,
b'compact': self.compact}
transaction_id = self.io.get_segments_transaction_id()
hints_file = os.path.join(self.path, 'hints.%d' % transaction_id)
with open(hints_file + '.tmp', 'wb') as fd:
msgpack.pack(hints, fd)
fd.flush()
os.fsync(fd.fileno())
os.rename(hints_file + '.tmp', hints_file)
self.index.write(os.path.join(self.path, 'index.tmp'))
os.rename(os.path.join(self.path, 'index.tmp'),
os.path.join(self.path, 'index.%d' % transaction_id))
if self.append_only:
with open(os.path.join(self.path, 'transactions'), 'a') as log:
print('transaction %d, UTC time %s' % (transaction_id, datetime.utcnow().isoformat()), file=log)
# Remove old auxiliary files
current = '.%d' % transaction_id
for name in os.listdir(self.path):
if not name.startswith(('index.', 'hints.')):
continue
if name.endswith(current):
continue
os.unlink(os.path.join(self.path, name))
self.index = None
def compact_segments(self, save_space=False):
"""Compact sparse segments by copying data into new segments
"""
if not self.compact:
return
index_transaction_id = self.get_index_transaction_id()
segments = self.segments
unused = [] # list of segments, that are not used anymore
def complete_xfer():
# complete the transfer (usually exactly when some target segment
# is full, or at the very end when everything is processed)
nonlocal unused
# commit the new, compact, used segments
self.io.write_commit()
# get rid of the old, sparse, unused segments. free space.
for segment in unused:
assert self.segments.pop(segment) == 0
self.io.delete_segment(segment)
del self.compact[segment]
unused = []
for segment, freeable_space in sorted(self.compact.items()):
if not self.io.segment_exists(segment):
del self.compact[segment]
continue
segment_size = self.io.segment_size(segment)
if segment_size > 0.2 * self.max_segment_size and freeable_space < 0.15 * segment_size:
logger.debug('not compacting segment %d for later (only %d bytes are sparse)',
segment, freeable_space)
continue
segments.setdefault(segment, 0)
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
try:
new_segment, offset = self.io.write_put(key, data, raise_full=save_space)
except LoggedIO.SegmentFull:
complete_xfer()
new_segment, offset = self.io.write_put(key, data)
self.index[key] = new_segment, offset
segments.setdefault(new_segment, 0)
segments[new_segment] += 1
segments[segment] -= 1
elif tag == TAG_DELETE:
if index_transaction_id is None or segment > index_transaction_id:
try:
self.io.write_delete(key, raise_full=save_space)
except LoggedIO.SegmentFull:
complete_xfer()
self.io.write_delete(key)
assert segments[segment] == 0
unused.append(segment)
complete_xfer()
def replay_segments(self, index_transaction_id, segments_transaction_id):
self.prepare_txn(index_transaction_id, do_cleanup=False)
try:
segment_count = sum(1 for _ in self.io.segment_iterator())
pi = ProgressIndicatorPercent(total=segment_count, msg="Replaying segments %3.0f%%", same_line=True)
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if index_transaction_id is not None and segment <= index_transaction_id:
continue
if segment > segments_transaction_id:
break
objects = self.io.iter_objects(segment)
self._update_index(segment, objects)
pi.finish()
self.write_index()
finally:
self.rollback()
def _update_index(self, segment, objects, report=None):
"""some code shared between replay_segments and check"""
self.segments[segment] = 0
for tag, key, offset, size in objects:
if tag == TAG_PUT:
try:
# If this PUT supersedes an older PUT, mark the old segment for compaction and count the free space
s, _ = self.index[key]
self.compact[s] += size
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
elif tag == TAG_DELETE:
try:
# if the deleted PUT is not in the index, there is nothing to clean up
s, offset = self.index.pop(key)
except KeyError:
pass
else:
if self.io.segment_exists(s):
# the old index is not necessarily valid for this transaction (e.g. compaction); if the segment
# is already gone, then it was already compacted.
self.segments[s] -= 1
size = self.io.read(s, offset, key, read_data=False)
self.compact[s] += size
elif tag == TAG_COMMIT:
continue
else:
msg = 'Unexpected tag {} in segment {}'.format(tag, segment)
if report is None:
raise self.CheckNeeded(msg)
else:
report(msg)
if self.segments[segment] == 0:
self.compact[segment] += self.io.segment_size(segment)
def _rebuild_sparse(self, segment):
"""Rebuild sparse bytes count for a single segment relative to the current index."""
self.compact[segment] = 0
if self.segments[segment] == 0:
self.compact[segment] += self.io.segment_size(segment)
return
for tag, key, offset, size in self.io.iter_objects(segment, read_data=False):
if tag == TAG_PUT:
if self.index.get(key, (-1, -1)) != (segment, offset):
# This PUT is superseded later
self.compact[segment] += size
elif tag == TAG_DELETE:
# The outcome of the DELETE has been recorded in the PUT branch already
self.compact[segment] += size
def check(self, repair=False, save_space=False):
"""Check repository consistency
This method verifies all segment checksums and makes sure
the index is consistent with the data stored in the segments.
"""
if self.append_only and repair:
raise ValueError(self.path + " is in append-only mode")
error_found = False
def report_error(msg):
nonlocal error_found
error_found = True
logger.error(msg)
logger.info('Starting repository check')
assert not self._active_txn
try:
transaction_id = self.get_transaction_id()
current_index = self.open_index(transaction_id)
except Exception:
transaction_id = self.io.get_segments_transaction_id()
current_index = None
if transaction_id is None:
transaction_id = self.get_index_transaction_id()
if transaction_id is None:
transaction_id = self.io.get_latest_segment()
if repair:
self.io.cleanup(transaction_id)
segments_transaction_id = self.io.get_segments_transaction_id()
self.prepare_txn(None) # self.index, self.compact, self.segments all empty now!
segment_count = sum(1 for _ in self.io.segment_iterator())
pi = ProgressIndicatorPercent(total=segment_count, msg="Checking segments %3.1f%%", step=0.1, same_line=True)
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if segment > transaction_id:
continue
try:
objects = list(self.io.iter_objects(segment))
except IntegrityError as err:
report_error(str(err))
objects = []
if repair:
self.io.recover_segment(segment, filename)
objects = list(self.io.iter_objects(segment))
self._update_index(segment, objects, report_error)
pi.finish()
# self.index, self.segments, self.compact now reflect the state of the segment files up to <transaction_id>
# We might need to add a commit tag if no committed segment is found
if repair and segments_transaction_id is None:
report_error('Adding commit tag to segment {}'.format(transaction_id))
self.io.segment = transaction_id + 1
self.io.write_commit()
if current_index and not repair:
# current_index = "as found on disk"
# self.index = "as rebuilt in-memory from segments"
if len(current_index) != len(self.index):
report_error('Index object count mismatch. {} != {}'.format(len(current_index), len(self.index)))
elif current_index:
for key, value in self.index.iteritems():
if current_index.get(key, (-1, -1)) != value:
report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
if repair:
self.compact_segments(save_space=save_space)
self.write_index()
self.rollback()
if error_found:
if repair:
logger.info('Completed repository check, errors found and repaired.')
else:
logger.error('Completed repository check, errors found.')
else:
logger.info('Completed repository check, no problems found.')
return not error_found or repair
def rollback(self):
"""
"""
self.index = None
self._active_txn = False
def __len__(self):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return len(self.index)
def __contains__(self, id):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return id in self.index
def list(self, limit=None, marker=None):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
def get(self, id_):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
try:
segment, offset = self.index[id_]
return self.io.read(segment, offset, id_)
except KeyError:
raise self.ObjectNotFound(id_, self.path) from None
def get_many(self, ids, is_preloaded=False):
for id_ in ids:
yield self.get(id_)
def put(self, id, data, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index[id]
except KeyError:
pass
else:
self.segments[segment] -= 1
size = self.io.read(segment, offset, id, read_data=False)
self.compact[segment] += size
segment, size = self.io.write_delete(id)
self.compact[segment] += size
self.segments.setdefault(segment, 0)
segment, offset = self.io.write_put(id, data)
self.segments.setdefault(segment, 0)
self.segments[segment] += 1
self.index[id] = segment, offset
def delete(self, id, wait=True):
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index.pop(id)
except KeyError:
raise self.ObjectNotFound(id, self.path) from None
self.segments[segment] -= 1
size = self.io.read(segment, offset, id, read_data=False)
self.compact[segment] += size
segment, size = self.io.write_delete(id)
self.compact[segment] += size
self.segments.setdefault(segment, 0)
def preload(self, ids):
"""Preload objects (only applies to remote repositories)
"""
class LoggedIO:
class SegmentFull(Exception):
"""raised when a segment is full, before opening next"""
header_fmt = struct.Struct('<IIB')
assert header_fmt.size == 9
put_header_fmt = struct.Struct('<IIB32s')
assert put_header_fmt.size == 41
header_no_crc_fmt = struct.Struct('<IB')
assert header_no_crc_fmt.size == 5
crc_fmt = struct.Struct('<I')
assert crc_fmt.size == 4
_commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
def __init__(self, path, limit, segments_per_dir, capacity=90):
self.path = path
self.fds = LRUCache(capacity,
dispose=self.close_fd)
self.segment = 0
self.limit = limit
self.segments_per_dir = segments_per_dir
self.offset = 0
self._write_fd = None
def close(self):
self.close_segment()
self.fds.clear()
self.fds = None # Just to make sure we're disabled
def close_fd(self, fd):
if hasattr(os, 'posix_fadvise'): # only on UNIX
os.posix_fadvise(fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
fd.close()
def segment_iterator(self, reverse=False):
data_path = os.path.join(self.path, 'data')
dirs = sorted((dir for dir in os.listdir(data_path) if dir.isdigit()), key=int, reverse=reverse)
for dir in dirs:
filenames = os.listdir(os.path.join(data_path, dir))
sorted_filenames = sorted((filename for filename in filenames
if filename.isdigit()), key=int, reverse=reverse)
for filename in sorted_filenames:
yield int(filename), os.path.join(data_path, dir, filename)
def get_latest_segment(self):
for segment, filename in self.segment_iterator(reverse=True):
return segment
return None
def get_segments_transaction_id(self):
"""Return the last committed segment.
"""
for segment, filename in self.segment_iterator(reverse=True):
if self.is_committed_segment(segment):
return segment
return None
def cleanup(self, transaction_id):
"""Delete segment files left by aborted transactions
"""
self.segment = transaction_id + 1
for segment, filename in self.segment_iterator(reverse=True):
if segment > transaction_id:
os.unlink(filename)
else:
break
def is_committed_segment(self, segment):
"""Check if segment ends with a COMMIT_TAG tag
"""
try:
iterator = self.iter_objects(segment)
except IntegrityError:
return False
with open(self.segment_filename(segment), 'rb') as fd:
try:
fd.seek(-self.header_fmt.size, os.SEEK_END)
except OSError as e:
# return False if segment file is empty or too small
if e.errno == errno.EINVAL:
return False
raise e
if fd.read(self.header_fmt.size) != self.COMMIT:
return False
seen_commit = False
while True:
try:
tag, key, offset, _ = next(iterator)
except IntegrityError:
return False
except StopIteration:
break
if tag == TAG_COMMIT:
seen_commit = True
continue
if seen_commit:
return False
return seen_commit
def segment_filename(self, segment):
return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
def get_write_fd(self, no_new=False, raise_full=False):
if not no_new and self.offset and self.offset > self.limit:
if raise_full:
raise self.SegmentFull
self.close_segment()
if not self._write_fd:
if self.segment % self.segments_per_dir == 0:
dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
if not os.path.exists(dirname):
os.mkdir(dirname)
sync_dir(os.path.join(self.path, 'data'))
self._write_fd = SyncFile(self.segment_filename(self.segment))
self._write_fd.write(MAGIC)
self.offset = MAGIC_LEN
return self._write_fd
def get_fd(self, segment):
try:
return self.fds[segment]
except KeyError:
fd = open(self.segment_filename(segment), 'rb')
self.fds[segment] = fd
return fd
def close_segment(self):
if self._write_fd:
self.segment += 1
self.offset = 0
self._write_fd.close()
self._write_fd = None
def delete_segment(self, segment):
if segment in self.fds:
del self.fds[segment]
try:
os.unlink(self.segment_filename(segment))
except FileNotFoundError:
pass
def segment_exists(self, segment):
return os.path.exists(self.segment_filename(segment))
def segment_size(self, segment):
return os.path.getsize(self.segment_filename(segment))
def iter_objects(self, segment, include_data=False, read_data=True):
"""
Return object iterator for *segment*.
If read_data is False then include_data must be False as well.
Integrity checks are skipped: all data obtained from the iterator must be considered informational.
The iterator returns four-tuples of (tag, key, offset, data|size).
"""
fd = self.get_fd(segment)
fd.seek(0)
if fd.read(MAGIC_LEN) != MAGIC:
raise IntegrityError('Invalid segment magic [segment {}, offset {}]'.format(segment, 0))
offset = MAGIC_LEN
header = fd.read(self.header_fmt.size)
while header:
size, tag, key, data = self._read(fd, self.header_fmt, header, segment, offset,
(TAG_PUT, TAG_DELETE, TAG_COMMIT),
read_data=read_data)
if include_data:
yield tag, key, offset, data
else:
yield tag, key, offset, size
offset += size
header = fd.read(self.header_fmt.size)
def recover_segment(self, segment, filename):
if segment in self.fds:
del self.fds[segment]
with open(filename, 'rb') as fd:
data = memoryview(fd.read())
os.rename(filename, filename + '.beforerecover')
logger.info('attempting to recover ' + filename)
with open(filename, 'wb') as fd:
fd.write(MAGIC)
while len(data) >= self.header_fmt.size:
crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
if size < self.header_fmt.size or size > len(data):
data = data[1:]
continue
if crc32(data[4:size]) & 0xffffffff != crc:
data = data[1:]
continue
fd.write(data[:size])
data = data[size:]
def read(self, segment, offset, id, read_data=True):
"""
Read entry from *segment* at *offset* with *id*.
If read_data is False the size of the entry is returned instead and integrity checks are skipped.
The return value should thus be considered informational.
"""
if segment == self.segment and self._write_fd:
self._write_fd.sync()
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.put_header_fmt.size)
size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ), read_data)
if id != key:
raise IntegrityError('Invalid segment entry header, is not for wanted id [segment {}, offset {}]'.format(
segment, offset))
return data if read_data else size
def _read(self, fd, fmt, header, segment, offset, acceptable_tags, read_data=True):
# some code shared by read() and iter_objects()
try:
hdr_tuple = fmt.unpack(header)
except struct.error as err:
raise IntegrityError('Invalid segment entry header [segment {}, offset {}]: {}'.format(
segment, offset, err)) from None
if fmt is self.put_header_fmt:
crc, size, tag, key = hdr_tuple
elif fmt is self.header_fmt:
crc, size, tag = hdr_tuple
key = None
else:
raise TypeError("_read called with unsupported format")
if size > MAX_OBJECT_SIZE or size < fmt.size:
raise IntegrityError('Invalid segment entry size [segment {}, offset {}]'.format(
segment, offset))
length = size - fmt.size
if read_data:
data = fd.read(length)
if len(data) != length:
raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, len(data)))
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
segment, offset))
if key is None and tag in (TAG_PUT, TAG_DELETE):
key, data = data[:32], data[32:]
else:
if key is None and tag in (TAG_PUT, TAG_DELETE):
key = fd.read(32)
length -= 32
if len(key) != 32:
raise IntegrityError('Segment entry key short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, 32, len(key)))
oldpos = fd.tell()
seeked = fd.seek(length, os.SEEK_CUR) - oldpos
data = None
if seeked != length:
raise IntegrityError('Segment entry data short seek [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, seeked))
if tag not in acceptable_tags:
raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
segment, offset))
return size, tag, key, data
def write_put(self, id, data, raise_full=False):
fd = self.get_write_fd(raise_full=raise_full)
size = len(data) + self.put_header_fmt.size
offset = self.offset
header = self.header_no_crc_fmt.pack(size, TAG_PUT)
crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
fd.write(b''.join((crc, header, id, data)))
self.offset += size
return self.segment, offset
def write_delete(self, id, raise_full=False):
fd = self.get_write_fd(raise_full=raise_full)
header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
fd.write(b''.join((crc, header, id)))
self.offset += self.put_header_fmt.size
return self.segment, self.put_header_fmt.size
def write_commit(self):
self.close_segment()
fd = self.get_write_fd()
header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
fd.write(b''.join((crc, header)))
self.close_segment()
| 37,836 | 10,666 |
from django.db.models import F
from rest_framework import serializers
from care.facility.models.uccbedrequest import UCCBedRequest
from care.users.api.serializers.user import UserBaseMinimumSerializer
from config.serializers import ChoiceField
from care.facility.api.serializers import TIMESTAMP_FIELDS
class UCCBedRequestSerializer(serializers.ModelSerializer):
#id = serializers.UUIDField(source="external_id", read_only=True)
class Meta:
model = UCCBedRequest
exclude = (
"deleted",
"modified_date",
"external_id"
)
read_only_fields = (
TIMESTAMP_FIELDS,
"created_date",
)
def create(self, validated_data):
bed_request = super(UCCBedRequestSerializer, self).create(validated_data)
return bed_request
| 848 | 248 |
#!/usr/bin/env python
"""
Attempt to create a "manor" akin to::
###############################################
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#.........#......#........#...........#.......#
#########+####+######+###########+#####.......#
#.......+......+......................+.......#
#.......######+######+#.......#######+#########
#.......#......#......#<<#....#.......#.......#
#.......#......#......#<<#....#.......#.......#
#.......#......#......####....+.......#.......#
#.......#......#......#..+....#.......#.......#
##########################....#################
##++##
"""
import random, copy, room
from library import shape, collection
from library.coord import *
from library.random_util import *
from library.feature import *
# Specific build styles:
BASE_SHAPE = "single-corridor"
L_LAYOUT = "L-corridors"
Z_LAYOUT = "Z-corridors"
N_LAYOUT = "N-corridors"
H_LAYOUT = "H-corridors"
O_LAYOUT = "O-corridors"
U_LAYOUT = "U-corridors"
class BuilderCollection (collection.ShapeCollection):
corridors = None
rooms = None
legs = None
main_corridor = None
def __init__ (self, c=[]):
if c != [] and isinstance(c, BuilderCollection):
self.legs = c.legs
collection.ShapeCollection.__init__(self, c)
self.rebuild()
def copy (self):
my_copy = BuilderCollection(copy.copy(self._shapes))
my_copy.legs = copy.deepcopy(self.legs)
return my_copy
def rebuild (self):
self.corridors = []
self.rooms = []
if not self.legs:
self.legs = []
for index, sh in enumerate(self):
if isinstance(sh.shape, MainCorridor):
self.main_corridor = index
if isinstance(sh.shape, Corridor):
self.corridors.append(index)
else:
self.rooms.append(index)
def corridor (self, index):
assert index in self.corridors
return self[index]
def get_corridors (self):
return self.corridors
def get_room (self, index):
assert index in self.rooms
return self[index]
def get_rooms (self):
if not self.rooms:
return None
return self.rooms
def mark_leg (self, leg):
self.legs.append(leg)
def count_legs (self):
return len(self.legs)
def leg_at (self, side, placement):
return (side, placement) in self.legs
def get_leg (self, side, placement):
for leg in self.legs:
if leg == (side, placement):
return leg
return None
def _rebuild_wrap (function):
def wrapper (self, *args, **kwargs):
function(self, *args, **kwargs)
self.rebuild()
wrapper.__name__ = function.__name__
wrapper.__doc__ = function.__doc__ + "\n\nCalling this function automatically rebuilds the BuilderCollection index."
return wrapper
__setitem__ = _rebuild_wrap(collection.ShapeCollection.__setitem__)
append = _rebuild_wrap(collection.ShapeCollection.append)
extend = _rebuild_wrap(collection.ShapeCollection.extend)
insert = _rebuild_wrap(collection.ShapeCollection.insert)
pop = _rebuild_wrap(collection.ShapeCollection.pop)
prioritise = _rebuild_wrap(collection.ShapeCollection.prioritise)
reverse = _rebuild_wrap(collection.ShapeCollection.reverse)
reversed = _rebuild_wrap(collection.ShapeCollection.reversed)
sort = _rebuild_wrap(collection.ShapeCollection.sort)
append = _rebuild_wrap(collection.ShapeCollection.append)
prioritise = _rebuild_wrap(collection.ShapeCollection.prioritise)
class Corridor (shape.Shape):
pass
class MainCorridor (Corridor):
pass
def join_row_rooms (row, left_corr=False, right_corr=False, check_offset=False):
"""
Given a list of rooms, joins them together as a ShapeCollection.
:``row``: A list of Room objects that should be placed in a row. *Required*.
:``left_corr``: If true, leaves a gap between the first and second rooms
to make space for a corridor. *Default False*.
:``right_corr``: If true, leaves a gap between the last and second-last rooms
to make space for a corridor. *Default False*.
:``check_offset``: If true, compares the room heights to see if they
need to be offset from the top. *Default False*.
"""
assert(len(row) > 2)
first_room = row[0].as_shape()
second_room = row[1].as_shape()
# Does some weird stuff to offset everything
offset_both = False
if check_offset and first_room.height() == second_room.height():
offset_both = True
# Join the first two rooms.
top_offset = 0
if check_offset:
top_offset = 2
overlap = 1
if left_corr:
overlap = -1
row_collection = shape.adjoin(first_room, second_room, top_offset=top_offset, overlap=overlap, collect=True, offset_both=offset_both)
# Join the middle rooms.
for curr in row[2:-1]:
room_shape = curr.as_shape()
to = top_offset
if check_offset and (room_shape.height() == first_room.height() and not offset_both or room_shape.height() > first_room.height()):
to = 0
row_collection = shape.adjoin(row_collection, room_shape, top_offset=to, overlap=1, collect=True, offset_both=offset_both)
# Join the last room.
last_room = row[-1].as_shape()
if check_offset and (last_room.height() == first_room.height() and not offset_both or last_room.height() > first_room.height()):
top_offset = 0
overlap = 1
if right_corr:
overlap = -1
row_collection = shape.adjoin(row_collection, last_room, top_offset=top_offset, overlap=overlap, collect=True)
return row_collection
ROOM_WIDTH_LIST = [7, 8, 9, 10, 11, 12]
def random_room_height ():
"""
Returns a random value for the height of a room.
"""
height = 7
if coinflip():
height += 1
elif one_chance_in(3):
height -= 1
return height
def base_builder (min_rooms=0, top_left=None, top_right=None, bottom_left=None, bottom_right=None, tl_corr=False, tr_corr=False, bl_corr=False, br_corr=False,top_height=None, bottom_height=None):
"""
Attempts to build a basic rectangular manor. It returns ShapeCollection
and a list of Room objects.
:``min_rooms``: The minimum number of rooms. *Default None*.
:``top_left``: The width of the top left room. Random, if none. *Default None*.
:``top_right``: The width of the top right room. Random, if none. *Default None*.
:``bottom_left``: The width of the bottom left room. Random, if none. *Default None*.
:``bottom_right``: The width of the bottom right room. Random, if none. *Default None*.
:``tl_corr``: If true, leaves a gap for a corridor between the top-left two rooms. *Default False*.
:``tr_corr``: If true, leaves a gap for a corridor between the top-right two rooms. *Default False*.
:``bl_corr``: If true, leaves a gap for a corridor between the bottom-left two rooms. *Default False*.
:``br_corr``: If true, leaves a gap for a corridor between the bottom-right two rooms. *Default False*.
:``top_height``: The height of the top row rooms. Random, if none. *Default None*.
:``bottom_height``: The height of the bottom row rooms. Random, if none. *Default None*.
"""
if top_left == None:
top_left = random.choice(ROOM_WIDTH_LIST)
if top_right == None:
top_right = random.choice(ROOM_WIDTH_LIST)
if bottom_left == None:
bottom_left = random.choice(ROOM_WIDTH_LIST)
if bottom_right == None:
bottom_right = random.choice(ROOM_WIDTH_LIST)
# tl_corr = True
# tr_corr = True
# bl_corr = True
# br_corr = True
print "tl: %s, tr: %s, bl: %s, br: %s" % (top_left, top_right, bottom_left, bottom_right)
print "tl: %s, tr: %s, bl: %s, br: %s" % (tl_corr, tr_corr, bl_corr, br_corr)
# Top row of rooms
row1 = []
# Corridor, then bottom row of rooms
row2 = []
max_length = 6*12 # currently unused
# manor_width = random.randint(max_length/2, max_length)
# Decide the row heights.
if top_height == None:
top_height = random_room_height()
if bottom_height == None:
bottom_height = random_room_height()
print "top_height: %s, bottom_height: %s" % (top_height, bottom_height)
# first rooms on either row
height1 = top_height
height2 = bottom_height
check_overlap = False
if top_left < bottom_left or top_left == bottom_left and coinflip():
height1 += 2
else:
height2 += 2
check_overlap = True
first = room.Room(width=top_left, height=height1)
row1.append(first)
first = room.Room(width=bottom_left, height=height2)
row2.append(first)
# print "first rooms: height1=%s, height2=%s" % (height1, height2)
length1 = top_left + top_right - 2
if tl_corr:
length1 += 2
if tr_corr:
length1 += 2
length2 = bottom_left + bottom_right - 2
if bl_corr:
length2 += 2
if br_corr:
length2 += 2
print "Row 1:"
print "room 1: w=%s, length1: %s" % (top_left, length1)
while len(row1) <= 5:
# If we have four rooms, one in three chance of not adding any more
# rooms.
if len(row1) > 3 and one_chance_in(3):
break
new_room = room.Room(width=random.choice(ROOM_WIDTH_LIST), height=top_height)
row1.append(new_room)
length1 += new_room.width - 1
print "room %s: w=%s, length1: %s" % (len(row1), new_room.width, length1)
print "room %s: w=%s" % (len(row1)+1, top_right)
manor_width = length1
print "\nRow 2:"
print "room 1: w=%s, length2: %s" % (bottom_left, length2)
while length2 < manor_width:
dist_left = manor_width - length2 + 1
if dist_left < 14:
new_width = dist_left
else:
new_width = random.choice(ROOM_WIDTH_LIST)
next_width = dist_left - new_width
if next_width < 7:
new_width = random.choice((6,7,8))
new_room = room.Room(width=new_width, height=bottom_height)
row2.append(new_room)
length2 += new_width - 1
print "room %s: w=%s, length2: %s" % (len(row2), new_width, length2)
print "room %s: w=%s" % (len(row2)+1, bottom_right)
# last rooms on either row
height1 = top_height
height2 = bottom_height
if top_right < bottom_right or top_right == bottom_right and coinflip():
height1 += 2
check_overlap = False
else:
height2 += 2
# check_overlap = True
# print "last rooms: height1=%s, height2=%s" % (height1, height2)
last = room.Room(width=top_right, height=height1)
row1.append(last)
last = room.Room(width=bottom_right, height=height2)
row2.append(last)
print "\nrow1: %s rooms, row2: %s rooms, manor width: %s" % (len(row1), len(row2), manor_width)
# Try to get the minimum number of rooms.
if len(row1) + len(row2) < min_rooms:
return base_builder(min_rooms - 1)
# Now, start drawing it! YAY!
# First row
row1_collection = join_row_rooms(row1, tl_corr, tr_corr)
# second row
row2_collection = join_row_rooms(row2, bl_corr, br_corr, True)
# Finally, make a corridor!
overlap = 3
if check_overlap:
overlap = 1
my_collection = shape.underneath(row1_collection, row2_collection, overlap=overlap, collect=True)
m = BuilderCollection(my_collection)
noncorr_left = min(top_left, bottom_left)
noncorr_right = min(top_right, bottom_right)
corridor_length = my_collection.width() - noncorr_left - noncorr_right
# print "noncorr_left: %s, noncorr_right: %s, corridor_length: %s" % (noncorr_left, noncorr_right, corridor_length)
corridor = MainCorridor(shape.Row(width=corridor_length, fill="."))
m.append(collection.ShapeCoord(corridor, coord.Coord(noncorr_left, top_height)))
return m
class Placement (object):
def __init__ (self, side1, side2, this_side):
self.sides = [side1, side2]
self.this_side = this_side
def opposite (self):
return self.sides[self.this_side-1]
def __hash__ (self):
return hash(str(self))
def __str__ (self):
return self.sides[self.this_side]
def __repr__ (self):
return "<Placement %s>" % self
def __cmp__ (self, other):
return cmp(str(self), str(other))
SIDE_LEFT = Placement("left", "right", 0)
SIDE_RIGHT = Placement("left", "right", 1)
PLACE_TOP = Placement("top", "bottom", 0)
PLACE_BOTTOM = Placement("top", "bottom", 1)
class Leg (object):
"""
The representation of a manor leg (or "wing") that is attached to the
base manor.
"""
def __init__ (self, h_placement, v_placement, width=None, height=None, leg=None):
assert not (leg is None and width is None and height is None)
if leg is not None:
width, height = leg.size()
self.placement = (h_placement, v_placement)
self.width = width
self.height = height
def __repr__ (self):
return "<Leg h:%s w:%s %s>" % (self.height, self.width, self.placement)
def __cmp__ (self, other):
if isinstance(other, Leg):
return cmp(self.placement, other.placement)
elif isinstance(other, tuple):
return cmp(self.placement, other)
def attach_leg (base, leg, side=SIDE_LEFT, placement=PLACE_TOP, corr_offset = None, x_offset = None):
"""
Take a result of base_builder() and attach a leg.
:``base``: The base shape collection.
:``leg``: The leg shape collection.
:``side``: Which side the leg should be placed on. *Default ``SIDE_LEFT``*.
:``placement``: Whether the leg should be placed above or below. *Default ``PLACE_TOP``*.
:``corr_offset``: A number by which to vertically offset the corridor placement.
If none, uses the default room height. *Default None*.
:``x_offset``: A number by which to horizontally offset the corridor placement.
*Default None*.
"""
assert not base.leg_at(side, placement)
old_leg = leg.copy()
no_vert_offset = False
vert_offset = 0
if base.leg_at(side.opposite(), placement):
l = base.get_leg(side.opposite(), placement)
vert_offset = base.height() - l.height
no_vert_offset = True
else:
vert_offset = base.height() - 1
# Find the corridor
corridor, start = base.corridor(base.main_corridor)
assert corridor is not None
# Find the corridor's end point
stop = coord.Coord(start)
stop.x = corridor.width()
if side == SIDE_RIGHT:
offs = leg[0].width() - start.x
leg.offset(coord.Coord(stop.x-offs-1, 0))
if x_offset == None:
x_offset = stop.x + start.x
elif side == SIDE_LEFT and x_offset == None:
x_offset = start.x
print "vert_offset: %s, x_offset: %s, no_vert_offset: %s" % (vert_offset, x_offset, no_vert_offset)
if corr_offset == None:
corr_offset = room.Room().height
ncorr_height = leg.height() + corr_offset - 1
new_corridor = Corridor(shape.Column(height=ncorr_height, fill="."))
corridor_offset = None
if placement == PLACE_BOTTOM:
if no_vert_offset:
base.place_on(leg, offset=coord.Coord(0, vert_offset))
else:
left_offset = 0
if side == SIDE_RIGHT:
left_offset = base.width()-leg.width()
base = shape.underneath(base, leg, left_offset=left_offset, overlap=1, collect=True)
new_corridor[coord.Coord(0, new_corridor.height()-1)] = "#"
corridor_offset = coord.Coord(x_offset, vert_offset - corr_offset + 1)
base.append(new_corridor, corridor_offset)
elif placement == PLACE_TOP:
if no_vert_offset:
base.place_on(leg)
else:
left_offset = 0
if side == SIDE_RIGHT:
left_offset = leg.width()-base.width()
# print "leg width (%s) - base width (%s) = left_offset (%s)" % (leg.width(), base.width(), left_offset)
base = shape.underneath(leg, base, left_offset=left_offset, overlap=1, collect=True)
new_corridor[POS_ORIGIN] = "#"
corridor_offset = coord.Coord(x_offset, 0)
base.append(new_corridor, corridor_offset)
if placement == PLACE_TOP:
start = coord.Coord(corridor_offset.x - 1, leg.height() - 1)
elif placement == PLACE_BOTTOM:
start = coord.Coord(corridor_offset.x - 1, vert_offset - corr_offset + 1)
base = BuilderCollection(base)
base.mark_leg(Leg(side, placement, leg=old_leg))
return base
def build_leg (rooms_tall=2, rooms_wide=2, width_left=12, width_right=12, make_corridor=True, do_cleanup=True):
"""
Create and return a "leg" to be used with add_leg.
:``rooms_tall``: How many rooms tall to make the leg. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
:``width_left``: The width of the leftmost rooms. *Default 12*.
:``width_right``: The width of the rightmost rooms. *Default 12*.
:``make_corridor``: Include a corridor when building. *Default True*.
:``do_cleanup``: Perform corridor, etc, clean-up when built. *Default True*.
"""
assert rooms_wide >= 1 and rooms_wide <= 2
assert rooms_tall >= 1
leg_rooms = collection.ShapeCollection()
if width_left == None:
width_left = random.choice(ROOM_WIDTH_LIST)
if width_right == None:
width_right = random.choice(ROOM_WIDTH_LIST)
heights = []
for r in xrange(rooms_tall):
heights.append(7)
for column in xrange(rooms_wide):
this_col = collection.ShapeCollection()
width = width_left
if column > 0:
width = width_right
height_list = heights[:]
if len(heights) > 1 and one_chance_in(5):
indices = range(len(height_list))
small = random.choice(indices)
indices.remove(small)
large = random.choice(indices)
height_list[small] -= 1
height_list[large] += 2
else:
large = random.choice(xrange(len(height_list)))
height_list[large] += 1
for row in xrange(rooms_tall):
new_room = room.Room(width=width,height=height_list[row]).as_shape()
# print "new_room height: %s, this_col height: %s" % (new_room.height(), this_col.height())
this_col = shape.underneath(new_room, this_col, offset_second=False, overlap=1, collect=True)
# print "leg_rooms width: %s, this_col width: %s" % (leg_rooms.width(), this_col.width())
leg_rooms = shape.adjoin(leg_rooms, this_col, overlap=-1, collect=True)
return leg_rooms
def build_L (base=None, min_rooms=0, rooms=2, rooms_wide=2):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
:``rooms``: How many rooms to build along the sides of the new axis. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
"""
side = random.choice([SIDE_LEFT, SIDE_RIGHT])
placement = random.choice([PLACE_TOP, PLACE_BOTTOM])
tlc = (side == SIDE_LEFT and placement == PLACE_TOP)
trc = (side == SIDE_RIGHT and placement == PLACE_TOP)
blc = (side == SIDE_LEFT and placement == PLACE_BOTTOM)
brc = (side == SIDE_RIGHT and placement == PLACE_BOTTOM)
if tlc or blc: # left side
tlw = random.choice(ROOM_WIDTH_LIST)
blw = random.choice(ROOM_WIDTH_LIST)
trw = None
brw = None
if tlc:
if blw < tlw:
blw = tlw
left = tlw
else:
if tlw < blw:
tlw = blw
left = blw
right = None
else: # right side
tlw = None
blw = None
trw = random.choice(ROOM_WIDTH_LIST)
brw = random.choice(ROOM_WIDTH_LIST)
if trc:
if brw < trw:
brw = trw
right = trw
else:
if trw < brw:
trw = brw
right = brw
left = None
tht = None
bht = None
corr_offset = random_room_height()
if placement == PLACE_TOP:
tht = corr_offset
else:
bht = corr_offset
if base is None:
base = base_builder(min_rooms=min_rooms-4, top_left=tlw, top_right=trw, bottom_left=blw, bottom_right=brw, tl_corr=tlc, tr_corr=trc, bl_corr=blc, br_corr=brc, top_height=tht, bottom_height=bht)
# Draw the new rooms.
new_rooms = build_leg(rooms, rooms_wide, width_left=left, width_right=right)
offset = None
if side == SIDE_RIGHT:
offset = base.width() - right - 1
base = attach_leg(base, new_rooms, side=side, placement=placement, corr_offset=corr_offset, x_offset=offset)
return base
def build_Z (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_N (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_O (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_H (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an H-shaped layout.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
outer = random.choice(ROOM_WIDTH_LIST) # outer leg
inner = random.choice(ROOM_WIDTH_LIST) # inner leg
tht = random_room_height()
bht = random_room_height()
if base is None:
base = base_builder(min_rooms=min_rooms-16, top_left=outer, top_right=outer, bottom_left=outer, bottom_right=outer,
tl_corr=True, tr_corr=True, bl_corr=True, br_corr=True, top_height=tht, bottom_height=bht)
base = build_U(base, min_rooms=min_rooms, placement=PLACE_TOP, outer=outer, inner=inner, room_height=tht)
base = build_U(base, min_rooms=min_rooms, placement=PLACE_BOTTOM, outer=outer, inner=inner, room_height=bht)
return base
def build_U (base=None, min_rooms=0, rooms=2, rooms_wide=2, placement=None, outer=None, inner=None, room_height=None):
"""
Modifies the results of base_builder() to result in an U-shaped layout.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
:``rooms``: How many rooms to build along the sides of the new axis. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
:``placement``: The vertical orientation of the manor legs. Random, if none. *Default None*.
:``inner``: The width of the inner manor legs' rooms. Random, if none. *Default None*.
:``outer``: The width of the outer manor legs' rooms. Random, if none. *Default None*.
:``room_height``: The height of the base manor rooms on the side facing the legs.
Random, if none. *Default None*.
"""
if placement is None:
placement = random.choice([PLACE_TOP, PLACE_BOTTOM])
if outer == None:
outer = random.choice(ROOM_WIDTH_LIST) # outer leg
if inner == None:
inner = random.choice(ROOM_WIDTH_LIST) # inner leg
tht = None
bht = None
if room_height == None:
room_height = random_room_height()
if placement == PLACE_TOP:
tht = room_height
else:
bht = room_height
if base is None:
tlc = (placement == PLACE_TOP)
trc = tlc
blc = not tlc
brc = blc
noleg = random.choice(ROOM_WIDTH_LIST) # opposite side
if noleg < outer:
noleg = outer
if tlc: # top
tlw = outer
trw = outer
blw = noleg
brw = noleg
else: # bottom
tlw = noleg
trw = noleg
blw = outer
brw = outer
base = base_builder(min_rooms=min_rooms-8, top_left=tlw, top_right=trw, bottom_left=blw, bottom_right=brw, tl_corr=tlc, tr_corr=trc, bl_corr=blc, br_corr=brc, top_height=tht, bottom_height=bht)
leg_width = outer + inner + 1
distance = base.width() - 2 * leg_width
print "base width=%s, outer=%s, inner=%s, leg width=%s, distance=%s" % (base.width(), outer, inner, leg_width, base.width() - 2*leg_width)
if distance < 5 and distance != -1:
if distance % 2 == 0 or base.width() % 2 == 0:
if distance < 0:
inner -= 2 + (-distance)
inner -= 2
else:
inner = base.width()/2 - outer
leg_width = outer + inner + 1
distance = base.width() - 2 * leg_width
print "base width=%s, outer=%s, inner=%s, leg width=%s, distance=%s" % (base.width(), outer, inner, leg_width, base.width() - 2*leg_width)
new_rooms_L = build_leg(rooms, rooms_wide, width_left=outer, width_right=inner)
new_rooms_R = build_leg(rooms, rooms_wide, width_left=inner, width_right=outer)
base = attach_leg(base, new_rooms_L, side=SIDE_LEFT, placement=placement, corr_offset=room_height)
base = attach_leg(base, new_rooms_R, side=SIDE_RIGHT, placement=placement, corr_offset=room_height, x_offset=base.width() - outer - 1)
return base
def builder_by_type (type = None, min_rooms=0):
"""
Creates and returns a manor of a given layout type.
:``type``: The layout type in a character representation. *Default None*.
``B``: base manor.
``L``: L-shaped layout.
``U``: L-shaped layout.
``H``: L-shaped layout.
``None``: random layout.
"""
if type == None:
return build_random(min_rooms=min_rooms)
if type == 'B':
return base_builder(min_rooms=min_rooms)
if type == 'L':
return build_L(min_rooms=min_rooms)
if type == 'U':
return build_U(min_rooms=min_rooms)
if type == 'H':
return build_H(min_rooms=min_rooms)
# The other types don't exist yet and fall back on the base_builder.
if type == 'O':
return build_O(min_rooms=min_rooms)
if type == 'N':
return build_N(min_rooms=min_rooms)
if type == 'Z':
return build_Z(min_rooms=min_rooms)
else:
return base_builder(min_rooms=min_rooms)
def build_random (base=None, min_rooms=0):
"""
Creates and returns a manor of a random layout type.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
l_list = [Z_LAYOUT, N_LAYOUT, O_LAYOUT, L_LAYOUT, U_LAYOUT, H_LAYOUT]
layout = random.choice(l_list)
if min_rooms > 25:
layout = H_LAYOUT
elif min_rooms > 20:
layout = random.choice(l_list[-2:])
elif min_rooms > 15:
layout = random.choice(l_list[-3:])
if layout == L_LAYOUT:
return build_L(base, min_rooms=min_rooms)
elif layout == Z_LAYOUT:
return build_Z(base, min_rooms=min_rooms)
elif layout == N_LAYOUT:
return build_N(base, min_rooms=min_rooms)
elif layout == H_LAYOUT:
return build_H(base, min_rooms=min_rooms)
elif layout == O_LAYOUT:
return build_O(base, min_rooms=min_rooms)
elif layout == U_LAYOUT:
return build_U(base, min_rooms=min_rooms)
else:
return base_builder(min_rooms=min_rooms)
| 28,802 | 9,577 |
from torchvision import datasets, transforms
from core.data.data_loaders.base import BaseDataLoader
class CIFAR10Loader(BaseDataLoader):
""" CIFAR10 data loading + transformations """
def __init__(self, data_dir,
batch_size=128,
training=True,
validation_split=0.0,
shuffle=False,
transformations="DefaultTransformations",
**kwargs):
print("[INFO][DATA] \t Preparing Cifar10 dataset ...")
_transf = BaseDataLoader.get_transformations(
self, name=transformations)
self.trans = _transf.get_train_trans() if training is True \
else _transf.get_test_trans()
self.data_dir = data_dir
self.dataset = datasets.CIFAR10(
self.data_dir, train=training, download=True, transform=self.trans)
super().__init__(self.dataset, batch_size, shuffle,
validation_split, **kwargs)
def get_class_names(self):
return('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
| 1,126 | 322 |
# RT Util - Content Data
from typing import TypedDict, Any
from textwrap import shorten
from discord import Embed
from orjson import loads
from .utils import is_json
__all__ = (
"ContentData", "disable_content_json", "enable_content_json",
"convert_content_json", "to_text"
)
class ContentData(TypedDict):
"`send`で送信可能なJSON形式のデータの型です。"
content: dict[str, Any]
author: int
json: bool
_acj_check_embeds = lambda data, type_: \
"embeds" in data["content"] and data["content"]["embeds"] \
and isinstance(data["content"]["embeds"][0], type_)
def disable_content_json(data: ContentData) -> ContentData:
"ContentDataのデータを`send`等で使える状態にします。"
if data["json"] and _acj_check_embeds(data, dict):
for index, embed in enumerate(data["content"]["embeds"]):
data["content"]["embeds"][index] = Embed.from_dict(embed)
data["json"] = False
return data
def enable_content_json(data: ContentData) -> ContentData:
"ContentDataをJSON形式にできるようにしています。"
if not data["json"] and _acj_check_embeds(data, Embed):
for index, embed in enumerate(data["content"]["embeds"]):
data["content"]["embeds"][index] = embed.to_dict()
data["json"] = True
return data
def convert_content_json(content: str, author: int, force_author: bool = False) -> ContentData:
"渡された文字列をContentDataにします。"
data = loads(content) if is_json(content) else ContentData(
content={"content": content}, author=force_author, json=True
)
if force_author:
data["author"] = author
return data
def to_text(data: ContentData) -> str:
"ContentDataをちょっとした文字列で表した形式にします。"
return shorten("".join(data['content'].get(key, "") for key in ("content", "embeds")), 35) | 1,751 | 625 |
import simpy
DEF_LINK_CAPACITY = 5
DEF_LINK_DELAY = 5
class Link:
"""
A link is the physical implementation of an interface.
"""
def __init__(self, env, capacity, delay):
self.env = env
self.delay = delay
self.msg = simpy.Store(env)
self.capacity = simpy.Container(env, capacity)
def latency(self, value):
"""
Delay message by
"""
yield self.env.timeout(self.delay)
self.msg.put(value)
def queue(self, value, size):
# queue message based on link capacity and message size
remaining = size
while remaining > 0:
if remaining > self.capacity.capacity:
chunk = self.capacity.capacity
else:
chunk = remaining
# handle contention of multiple senders
self.capacity.put(chunk)
# link: capacity of 1, transfers message of size 1 in 1 time unit
yield self.env.timeout(chunk / self.capacity.capacity)
self.capacity.get(chunk)
remaining -= chunk
self.env.process(self.latency(value))
def put(self, value, size):
self.env.process(self.queue(value, size))
def get(self):
return self.msg.get()
| 1,270 | 370 |
#!/usr/bin/env python
#
# lingui.zeng@huawei.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Test 'parser' module
"""
import logging
import os
import unittest
import yaml
import mock
from dovetail import parser
from dovetail.utils.dovetail_config import DovetailConfig as dt_cfg
@mock.patch('dovetail.parser.Parser.logger')
class TestParser(unittest.TestCase):
test_path = os.path.dirname(os.path.realpath(__file__))
def setUp(self):
"""Test case setup"""
dt_cfg.load_config_files()
logging.disable(logging.CRITICAL)
def test_parser_cmd(self, mock_logger):
"""Test whether the command is correctly parsed."""
mock_cmd = "python /functest/ci/run_tests.py "\
"-t {{validate_testcase}} -r"
with open(os.path.join(self.test_path, 'test_testcase.yaml')) as f:
mock_testcase_yaml = yaml.safe_load(f)
MockTestcase = type('Testcase', (object,), {})
mock_testcase = MockTestcase()
mock_testcase.testcase = mock_testcase_yaml.values()[0]
output = parser.Parser.parse_cmd(mock_cmd, mock_testcase)
expected_output = ("python /functest/ci/run_tests.py -t "
"tempest_smoke_serial -r")
self.assertEqual(expected_output, output)
def test_parser_cmd_fail(self, mock_logger):
"""Test whether the command is correctly parsed."""
mock_cmd = "python /functest/ci/run_tests.py "\
"-t {{validate_testcase}} -r"
mock_testcase_yaml = {}
MockTestcase = type('Testcase', (object,), {})
mock_testcase = MockTestcase()
mock_testcase.testcase = mock_testcase_yaml.values()
output = parser.Parser.parse_cmd(mock_cmd, mock_testcase)
expected_output = ("python /functest/ci/run_tests.py -t "
"None -r")
self.assertEqual(expected_output, output)
if __name__ == '__main__':
unittest.main()
| 2,163 | 685 |
from rest_framework import serializers
from .models import * # Student, StarInfo, SexInfo, GeoInfo, AnhuiGeoInfo, ProvinceInfo
class SexInfoSerializer(serializers.ModelSerializer):
class Meta:
model = SexInfo
fields = ['name', 'value']
# 星座信息序列化器
class StarInfoSerializer(serializers.ModelSerializer):
class Meta:
model = StarInfo
fields = ['name', 'value']
# 省外数据的序列化器
class Geo1Serializer(serializers.ModelSerializer):
class Meta:
model = GeoInfo
fields = ['value']
class Geo2Serializer(serializers.ModelSerializer):
class Meta:
model = GeoInfo
fields = ['coords']
# 省内数据的序列化器
class Geo3Serializer(serializers.ModelSerializer):
class Meta:
model = AnhuiGeoInfo
fields = ['value']
class Geo4Serializer(serializers.ModelSerializer):
class Meta:
model = AnhuiGeoInfo
fields = ['coords']
# 省份信息序列化器
class ProvinceInfoSerializer(serializers.ModelSerializer):
class Meta:
model = ProvinceInfo
fields = ['name', 'value']
| 1,125 | 386 |
from pymongo import MongoClient
client = MongoClient()
db = client['yelp']
movie_collection = db.movies
ratings_collection = db.ratings
tags_collection = db.tags
links_collection = db.links
def findUser(userId):
return db.user.find_one({"user_id": userId})
def find_movie(name):
for movie in movie_collection.find({"title": {'$regex': "/^%s" % (name)}}):
print movie
def test():
for movie in movie_collection.find():
for r in ratings_collection.find({"movieId": movie["movieId"]}):
print movie, r
break
for t in tags_collection.find({"movieId": movie["movieId"]}):
print t
break
for l in links_collection.find({"movieId": movie["movieId"]}):
print l
break
break
# print ratings_collection.find({"userId": 295}).count()
# for rating in ratings_collection.find():
# print rating["movieId"]
| 923 | 282 |
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# X = input("Enter first string: ").lower()
# Y = input("Enter second string: ").lower()
# X ="I love horror movies"
# Y ="Lights out is a horror movie"
def cosineSimilarity(X, Y):
# tokenization
X_list = word_tokenize(X)
Y_list = word_tokenize(Y)
# sw contains the list of stopwords
sw = stopwords.words('english')
l1 =[];l2 =[]
# remove stop words from the string
X_set = {w for w in X_list if not w in sw}
Y_set = {w for w in Y_list if not w in sw}
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
# cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
cosine = c / float((sum(l1)*sum(l2))**0.5)
# print("similarity: ", cosine)
return cosine | 1,072 | 405 |
#!/usr/bin/env python
iosxe_hostname = "ios-xe-mgmt.cisco.com"
iosxe_netconf_port = 10000
iosxe_username = "developer"
iosxe_password = "C1sco12345"
from ncclient import manager
import xmltodict
import xml.dom.minidom
# Create an XML filter for targeted NETCONF queries
netconf_filter = """
<filter>
<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
<interface></interface>
</interfaces>
</filter>"""
with manager.connect(
host=iosxe_hostname,
port=iosxe_netconf_port,
username=iosxe_username,
password=iosxe_password,
hostkey_verify=False,
) as m:
netconf_reply = m.get_config(source="running", filter=netconf_filter)
# print(xml.dom.minidom.parseString(netconf_reply.xml).toprettyxml())
netconf_data = xmltodict.parse(netconf_reply.xml)["rpc-reply"]["data"]
interfaces = netconf_data["interfaces"]["interface"]
for interface in interfaces:
print(f"Interface {interface['name']} enabled status is {interface['enabled']}")
# with manager.connect(
# host=iosxe_hostname,
# port=iosxe_netconf_port,
# username=iosxe_username,
# password=iosxe_password,
# hostkey_verify=False,
# ) as m:
# c = m.get_config(source="running")
# for capability in m.server_capabilities:
# print(capability)
| 1,303 | 460 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from crawl_movies.models.mongo import MovieDetail, WorkerDetail
class CrawlMoviesPipeline(object):
def process_item(self, item, spider):
"""
管道接受item
:param item:
:param spider:
:return:
"""
# 当前数据是否存在
saved = MovieDetail.objects(movie_id=item['movie_id'])
# 不存在则添加
if not saved:
self.save_movie_data(item)
return item
def save_movie_data(self, item):
"""
保存电影详情
:param item:
:return:
"""
movie = MovieDetail()
movie.movie_id = item.get('movie_id', '')
movie.magnet = self.splite_magnet(item.get('magnet', ''))
pass
def split_words(self, words: str, flag):
"""
人名分词
:param words:
:param flag:
:return:
"""
if flag == 'name':
words = words.replace(' ', '')
if words.find('<br>') != -1:
words.split(' <br> ')
return words
def splite_magnet(self, url):
"""
清洗磁力链
:param url:
:return:
"""
return url.replace('amp;', '') if url else ''
| 1,375 | 464 |
from typing import List
class Solution:
def findLucky(self, arr: List[int]) -> int:
dict = {}
for a in arr:
dict.setdefault(a, 0)
dict[a] += 1
res = -1
for k, v in dict.items():
if k == v:
res = max(res, k)
return res
if __name__ == "__main__":
s = Solution()
result = s.findLucky([1, 2, 2, 3, 3, 3])
print(result)
| 427 | 153 |
import json
import os
import cauldron
from cauldron.test import support
from cauldron.test.support.flask_scaffolds import FlaskResultsTest
EXAMPLE_PROJECTS_DIRECTORY = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(cauldron.__file__)),
'resources',
'examples'
))
class TestSyncOpen(FlaskResultsTest):
""" """
def test_no_args(self):
""" should error without arguments """
opened = self.post('/sync-open')
self.assertEqual(opened.flask.status_code, 200)
response = opened.response
self.assert_has_error_code(response, 'INVALID_ARGS')
def test_missing_definition(self):
""" should error without cauldron.json definition argument """
opened = self.post('/sync-open', {'source_directory': 'abc'})
response = opened.response
self.assert_has_error_code(response, 'INVALID_ARGS')
def test_missing_source_directory(self):
""" should error without source directory argument """
opened = self.post('/sync-open', {'definition': 'abc'})
response = opened.response
self.assert_has_error_code(response, 'INVALID_ARGS')
def test_open(self):
""" should open project remotely """
source_directory = os.path.join(
EXAMPLE_PROJECTS_DIRECTORY,
'hello_text'
)
source_path = os.path.join(source_directory, 'cauldron.json')
with open(source_path, 'r') as f:
definition = json.load(f)
opened = self.post('/sync-open', dict(
definition=definition,
source_directory=source_directory
))
response = opened.response
self.assert_has_success_code(response, 'PROJECT_OPENED')
project = cauldron.project.get_internal_project()
self.assertEqual(project.remote_source_directory, source_directory)
| 1,882 | 561 |
"""
@author: Junguang Jiang
@contact: JiangJunguang1123@outlook.com
"""
import torch.nn as nn
class LeNet(nn.Sequential):
def __init__(self, num_classes=10):
super(LeNet, self).__init__(
nn.Conv2d(1, 20, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(20, 50, kernel_size=5),
nn.Dropout2d(p=0.5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Flatten(start_dim=1),
nn.Linear(50 * 4 * 4, 500),
nn.ReLU(),
nn.Dropout(p=0.5),
)
self.num_classes = num_classes
self.out_features = 500
def copy_head(self):
return nn.Linear(500, self.num_classes)
class DTN(nn.Sequential):
def __init__(self, num_classes=10):
super(DTN, self).__init__(
nn.Conv2d(3, 64, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.Dropout2d(0.1),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(128),
nn.Dropout2d(0.3),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(256),
nn.Dropout2d(0.5),
nn.ReLU(),
nn.Flatten(start_dim=1),
nn.Linear(256 * 4 * 4, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Dropout(),
)
self.num_classes = num_classes
self.out_features = 512
def copy_head(self):
return nn.Linear(512, self.num_classes)
def lenet(pretrained=False, **kwargs):
"""LeNet model from
`"Gradient-based learning applied to document recognition" <http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`_
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 28 x 28.
"""
return LeNet(**kwargs)
def dtn(pretrained=False, **kwargs):
""" DTN model
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 32 x 32.
"""
return DTN(**kwargs) | 2,166 | 878 |
import itertools
class mind:
"""
Třída řešící hru Mastermind ve třech úrovních obtížnosti.
Podporované módy:
1) Hádání 4 pozic se 6 barvami
2) Hádání 5 pozic s 7 barvami
3) Hádání 6 pozic s 8 barvami
O zadání, učování správného řešení a ohodnocování jednotlivých tahů
se stará třída master.
V prvním kole se pro každý herní mód využije pevně danný tah. Ten by
měl pro další kola vyloučit co nejvyšší množství "potencíálních"
řešení.
Po ohodnocení prvního kola (zajišťuje master), jsou z množiny všech
možných řešení dané úlohy vyloučeny ty nesprávné. Tedy ty, pro
které by (pokud by byly hledaným řešením úlohy) nemohl naposledy
hraný tah získat stejné ohodnocení, které dostal.
Postup se opakuje, dokud není množina všech řešení dostatečně malá
(moemntálně pod 1000 prvků). Zde přichází do hry výběr nejlepšího
dalšího tahu. Za ten je považován tah, který je nejvíce podobný
ostatním (má nejvyšší součetsoučtu vůči ostatním nejvyšší skóre).
"""
def __init__(self):
# Kódové označení
self.codename = "up to 32 characters"
# Herní mód - obsahuje čísla 4 (4 ze 6), 5 (5 ze 7), 6 (6 z 8)
self.game_mode = 0
# Set s všemi možnými řešeními aktuální úlohy
self.possibilities = set()
# Jde o první tah?
self.first_guess = True
# Ohodnocení posledního pokusu o řešení
self.last_score = 0
# Cache vzájemného ohodnocení dvou možností
self.cache = {}
def init(self, numbers, positions):
"""
Metoda volaná po každé změně obtížnosti (herního typu), aby se
nastavilo vše potřebné.
Parametry:
----------
numbers:integer
Počet číslic, které mohou být na jedné pozici v rozsahu
0... numbers-1
positions:integer
Počet pozic.
"""
self.game_mode = positions
self.possibilities = set(itertools.product(range(numbers), repeat=positions))
self.first_guess = True
self.cache = {}
def pick_best_guess(self):
"""
Metoda, jenž se pokusí o nalezení nejlepšího dalšího tahu.
Protože je relativně pomalá (porovnává prvky v poli řešení
každý s každým), měla by se volat až když je pole řešení
menší.
Vrací:
------
set
Nejlepší možný tah.
"""
best = {}
if len(self.possibilities) == 1:
return self.possibilities.pop()
# Kontroluje každý s každým
for guess in self.possibilities:
for compare in self.possibilities:
# Samo se sebou neporovnává
if guess == compare:
continue
# Vytvoří počítadlo
if not guess in best:
best[guess] = 0
# Přičte vzájemné skóre k počítadlu.
best[guess] += self.get_score( guess, compare)
# Vrátí tah s nejvyšším součtem všech skóre
return max(best, key=lambda key: best[key])
def count_matching_colors(self, a, b):
"""
Spočítá počet stejných barev (na různých pozicích) v řešení a
a b.
Parametry:
---------
a:set
Řešení A
b:set
Řešení B
Vrací:
------
integer
Počet stejných barev.
"""
count = 0
a_iterator = iter(sorted(a))
b_iterator = iter(sorted(b))
a_value = next(a_iterator)
b_value = next(b_iterator)
try:
while True:
if a_value == b_value:
count += 1
a_value = next(a_iterator)
b_value = next(b_iterator)
elif a_value < b_value:
a_value = next(a_iterator)
else:
b_value = next(b_iterator)
except StopIteration:
return count
def get_score( self, guess, compare):
"""
Metoda vracející vzájemné ohodnocení dvou možných řešení.
Parametry:
----------
guess:set
Řešení A
compare:set
Řešení B
"""
# Prohledávání cache, zda jsme to už nepočítali.
# Bohužel mě nenapadlo jak vytvořit unikátní klíč
# na základě parametrů guess a compare tak, aby
# nezáleželo na jejich pořadí.
#
# Memoize by asi moc nepomohlo...
a = guess + compare
b = compare + guess
if a in self.cache:
return self.cache[a]
elif b in self.cache:
return self.cache[b]
# Výpočet ohodnocení
key = a
blacks = sum(1 for a, b in zip(guess, compare) if a == b)
color_matches = self.count_matching_colors(guess, compare)
whites = color_matches - blacks
# Uložení do cache
self.cache[key] = blacks * 10 + whites
return blacks * 10 + whites
def guess(self):
guess = 0
if self.first_guess:
self.first_guess = False
if self.game_mode == 4:
guess = (0, 0, 1, 1)
elif self.game_mode == 5:
guess = (0, 0, 1, 1, 2)
elif self.game_mode == 6:
guess = (0, 0, 1, 1, 2, 2)
self.possibilities.remove(guess)
# Čas hledat nejlepší řešení
# Neosvědčilo se
"""
if len(self.possibilities) < 1000:
guess = self.pick_best_guess()
else:
"""
guess = self.possibilities.pop()
return guess
def eval(self, guess, black, white):
self.last_score = black * 10 + white
# Promaže všechny možnosti, která nemohou být řešením
self.possibilities = set(filter(
lambda n: self.get_score(guess,n) == self.last_score,
self.possibilities
)) | 6,164 | 2,107 |
#!/usr/bin/env python
import argparse
from yattag import Doc
import matplotlib.pyplot as plt, mpld3
import collections
class HtmlReportGenerator:
"""Generates html reports"""
def __init__(self, args):
"""Constructor"""
self.args = args
self.report = "" # the html string
self.variables = {} # dictionary cotnaining all the result variables
self.title = ""
self.plots = collections.OrderedDict() # dictionary of matplotlib plots
self.plots_mpld3 = collections.OrderedDict() # dictionary of mpld3 plots
def generate(self):
"""generates the report given the elements added until now"""
doc, tag, text = Doc().tagtext()
self.convert_plots()
with tag('html'):
with tag('body'):
with tag('h1', id = 'report_title'):
text(self.title)
with tag('ul', id = 'values'):
for k,v in self.variables.items():
with tag('li', id = 'values'):
text(k + ": " + str(v))
for k,v in self.plots_mpld3.items():
with tag('h2'):
text(k)
doc.asis(v)
self.report = doc.getvalue()
return self.report
def save(self):
"""saves the report"""
f = open(self.args["filename_out"], 'w')
f.write(self.report)
f.close()
def convert_plots(self):
"""Convert from matplotlib to mpld3"""
for k,v in self.plots.items():
self.plots_mpld3[k] = mpld3.fig_to_html(v)
def create_add_plot(self, time, plot_values, title):
"""
Helps generating a matplotlib plot and adds the result to the plots list
:param time list of time values
:param plot_values dict of list of values
:param title a string
"""
fig = plt.figure(len(self.plots))
for v in plot_values:
plt.plot(time, v[1])
plt.xlabel("time [s]")
plt.legend([ v[0] for v in plot_values])
self.plots[title] = fig
def test(self):
"""Test"""
# doc, tag, text = Doc().tagtext()
# with tag('html'):
# with tag('body'):
# with tag('p', id = 'main'):
# text('some text')
# with tag('a', href='/my-url'):
# text('some link')
# print(doc.getvalue())
self.variables = {
"a": 0.3,
"b": 0.2}
self.title = "Test Report"
f = plt.figure(1)
plt.plot([1,2,3,4])
self.plots["Test plot"] = f
self.generate()
self.save()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='generates html reports from your results')
# parser.add_argument('-a', dest='analysis', nargs=1, metavar='analysistype')
# parser.add_argument('-i', dest='filename', default='./data/datastorage.h5')
# parser.add_argument('-l', dest='listanalysis', action='store_true')
parser.add_argument('-o', dest='filename_out', default='report.html')
args = parser.parse_args()
rg = HtmlReportGenerator(args)
rg.test()
| 3,245 | 955 |
class ScreenGrabListener(object):
"""
Basic listener class to process a grabbed image via the screenshooter.
"""
def process(self, grabbed_image):
"""
Abstract method. Processes a grabbed image.
:param grabbed_image: The grabbed image as PIL.Image.
"""
raise NotImplementedError("")
| 341 | 100 |
"""
Interfaceing with the Student Web Service,
for notice resource
"""
import copy
import logging
from restclients.models.sws import Notice, NoticeAttribute
from restclients.sws import get_resource
from dateutil import parser
import pytz
notice_res_url_prefix = "/student/v5/notice/"
logger = logging.getLogger(__name__)
def get_notices_by_regid(regid):
"""
Returns a list of restclients.models.sws.Notice objects
for the passed regid.
"""
url = notice_res_url_prefix + regid + ".json"
return _notices_from_json(get_resource(url))
def _notices_from_json(notice_data):
notices_list = notice_data.get("Notices")
if notices_list is None:
return None
notices = []
for notice in notices_list:
notice_obj = Notice()
notice_obj.notice_category = notice.get("NoticeCategory")
notice_obj.notice_content = notice.get("NoticeContent")
notice_obj.notice_type = notice.get("NoticeType")
notice_attribs = []
try:
for notice_attrib in notice.get("NoticeAttributes"):
attribute = NoticeAttribute()
attribute.data_type = notice_attrib.get("DataType")
attribute.name = notice_attrib.get("Name")
# Currently known data types
if attribute.data_type == "url":
attribute._url_value = notice_attrib.get("Value")
elif attribute.data_type == "date":
# Convert to UTC datetime
date = parser.parse(notice_attrib.get("Value"))
localtz = pytz.timezone('America/Los_Angeles')
local_dt = localtz.localize(date)
utc_dt = local_dt.astimezone(pytz.utc)
attribute._date_value = utc_dt
elif attribute.data_type == "string":
attribute._string_value = notice_attrib.get("Value")
else:
logger.warn("Unkown attribute type: %s\nWith Value:%s" %
(attribute.data_type,
notice_attrib.get("value")))
continue
notice_attribs.append(attribute)
except TypeError:
pass
notice_obj.attributes = notice_attribs
notices.append(notice_obj)
return _associate_short_long(notices)
def _associate_short_long(notices):
"""
If a notice is type ${1}Short, associate with its Long notice
in an attribute called long_notice.
"""
for notice in notices:
if notice.notice_type is not None and\
notice.notice_category == "StudentFinAid" and\
notice.notice_type.endswith("Short"):
notice.long_notice = _find_notice_by_type(notices,
notice.notice_type[:-5])
return notices
def _find_notice_by_type(notices, type):
for notice in notices:
if notice.notice_type == type:
return notice
return None
| 3,055 | 862 |
# -*- coding: utf-8 -*-
import asyncio
import logging
import math
import platform
import time
import urllib
import uuid as _uuid
from datetime import datetime
import aiohttp
import requests
APP_NAME = "DzienniczekPlus 2.0"
APP_VERSION = "1.4.2"
APP_OS = "Android"
APP_USER_AGENT = "Dart/2.10 (dart:io)"
log = logging.getLogger("client")
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
log.addHandler(handler)
TIME_FORMAT_H_M = "%H:%M"
class VulcanAPIException(Exception):
pass
def default_device_model():
return "Vulcan API (Python {})".format(platform.python_version())
async def get_base_url(token):
code = token[0:3]
components = await get_components()
try:
return components[code]
except KeyError:
raise VulcanAPIException("Invalid token!")
async def get_components():
log.info("Getting Vulcan components...")
async with aiohttp.ClientSession() as session:
async with session.get(
"http://komponenty.vulcan.net.pl/UonetPlusMobile/RoutingRules.txt"
) as r:
if r.headers["Content-Type"] == "text/plain":
r_txt = await r.text()
components = (c.split(",") for c in r_txt.split())
components = {a[0]: a[1] for a in components}
else:
components = {}
components.update({"FK1": "http://api.fakelog.tk"})
return components
def get_firebase_token():
log.info("Getting Firebase token...")
aid = "4609707972546570896:3626695765779152704"
device = aid.split(":")[0]
app = "pl.edu.vulcan.hebe"
data = {
"sender": "987828170337",
"X-scope": "*",
"X-gmp_app_id": "1:987828170337:android:ac97431a0a4578c3",
"app": app,
"device": device,
}
headers = {
"Authorization": "AidLogin {}".format(aid),
"User-Agent": "Android-GCM/1.5",
"app": app,
}
r = requests.post(
"https://android.clients.google.com/c2dm/register3", data=data, headers=headers
)
token = r.text.split("=")[1]
return token
def millis():
return math.floor(time.time() * 1000)
def now_datetime(): # UTC+0
return datetime.utcnow()
def now_iso(dt=None): # ISO 8601, local timezone
return (dt or datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
def now_gmt(dt=None): # RFC 2822, UTC+0
return (dt or datetime.utcnow()).strftime("%a, %d %b %Y %H:%M:%S GMT")
def uuid(seed=None):
if seed:
return str(_uuid.uuid5(_uuid.NAMESPACE_X500, str(seed)))
return str(_uuid.uuid4())
def urlencode(s):
return urllib.parse.quote(str(s))
| 2,654 | 990 |
#!/usr/bin/python3.6
import sys,etcdget,subprocess,time
with open('Data/chkuser2','w') as f:
f.write(str(sys.argv))
y=[]
z=[]
x=etcdget.etcdget('updlogged/'+sys.argv[1])
z.append(sys.argv[2])
y.append(x[0])
cmdline=['./pump.sh','UnixChkUser2']+sys.argv[1:]
result=subprocess.run(cmdline,stdout=subprocess.PIPE)
while float(z[0]) > 100 and str(y[0])==str(x[0]):
time.sleep(2)
y=etcdget.etcdget('updlogged/'+sys.argv[1])
z=etcdget.etcdget('logged/'+sys.argv[1])
y=etcdget.etcdget('logged/'+sys.argv[1])
print(y[0])
| 518 | 252 |
# Generated by Django 3.1.5 on 2021-02-04 19:45
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Blog",
fields=[
("id", models.IntegerField(primary_key=True, serialize=False)),
("slug", models.SlugField(default="")),
("title", models.TextField()),
("date", models.DateField()),
("content", models.TextField()),
],
managers=[
("blogs", django.db.models.manager.Manager()),
],
),
]
| 715 | 196 |
import numpy as np
from scipy.optimize import fmin_cg
from utils.optimize import gradient_desc, computeNumericalGradient
from utils.utility import sigmoid, sigmoid_grad, ravel, unravel
import sys
np.set_printoptions(threshold=sys.maxsize)
np.seterr(divide = 'ignore')
class NeuralNetwork:
def __init__(self, hidden_layers, units):
self.hidden_layers = hidden_layers
self.units = units
def _forward_prop(self, X, y, theta, i):
m, n = X.shape
a_ = []
z_ = []
J = 0
a = np.array(X[i, :].T).reshape(n, 1)
a_.append(a)
z_.append(np.concatenate((np.ones((1, 1)), a), axis=0))
for j in range(self.hidden_layers+1):
z = np.array(theta[j] @ a)
a = sigmoid(z)
a = np.concatenate((np.ones((1, 1)), a), axis=0)
a_.append(a)
z_.append(z)
a_L = a[1:]
J = ((((y[i, :])@np.log(a_L)) + ((1-y[i, :])@np.log(1-a_L)))/m)
return J, np.array(z_), np.array(a_)
def _back_prop(self, X, y, theta, z, a):
y = np.array(y).reshape(10, 1)
a_L = a[-1][1:, :]
theta_grad = []
delta = a_L - y
theta_grad.append(np.array(delta * a[len(theta)-1].T))
for j in range(len(theta)-1, 0, -1):
delta = (theta[j][:, 1:].T @ delta) * sigmoid_grad(z[j])
theta_grad.append(np.array(delta * a[j-1].T))
return theta_grad[::-1]
def _cost_function(self, theta, X, y, lambda_):
m, n = X.shape
J = 0
theta = np.array(unravel(theta, self.units, self.hidden_layers, n-1))
theta_grad = np.array([np.zeros(x.shape) for x in theta])
for i in range(m):
j, z, a = self._forward_prop(X, y, theta, i)
J -= j
theta_grad += np.array(self._back_prop(X, y[i, :].T, theta, z, a))
reg_sum = 0
for i in range(self.hidden_layers+1):
reg_sum += sum(sum(np.array(theta[i])**2)[1:])
J += (lambda_/(2*m))*reg_sum
reg_sum = 0
for i in range(self.hidden_layers+1):
reg_sum += sum(sum(np.array(theta[i]))[1:])
for i in range(len(theta)):
theta_grad[i] = np.c_[(1/m)*theta_grad[i][:, 0], ((1/m)*theta_grad[i][:, 1:]+(lambda_/m)*theta[i][:, 1:])]
theta_grad = ravel(theta_grad)
return J, theta_grad
def fit(self, X, y, lambda_=1, iter=500, alpha=1):
""" Fit the Nural Network according to the given training data.
Parameters
----------
X : Numpy array, shape: (n_samples, n_features).
Training Data. Consists of feature vectors with n_features.
y : Numpy array (vector), shape: (n_samples, 1).
Target Values or Labels. It is a vector with n_samples elements.
lambda_ : Scalar, Real-Number.
Regularization Parameter.
alpha : Scalar, Real-Number.
It defines the step-size to be taken during gradient descent.
iter : Scalar, Positive Integer.
It defines the number of times to run gradient descent.
Return
------
self : object
"""
m, n = X.shape
X = np.concatenate((np.ones((m, 1)), X), axis=1)
theta = np.random.randn((n+1)*self.units + self.units*(self.units+1)*(self.hidden_layers-1) + 10*(self.units+1), 1) * 2
theta = np.sin(theta)
y_ = np.zeros((m, max(y)[0]+1))
for i in range(m):
y_[i, y[i][0]] = 1
y = y_
self.optimal_theta = gradient_desc(self._cost_function, X, y, theta, lambda_, iter, alpha, disp_curve)
return self
def predict(self, X):
m, n = X.shape
X = np.concatenate((np.ones((m, 1)), X), axis=1)
m, n = X.shape
y_predict = np.zeros((X.shape[0], 1))
theta = unravel(self.optimal_theta, self.units, self.hidden_layers, n-1)
for i in range(m):
a = np.array(X[i, :].T).reshape(n, 1)
for j in range(self.hidden_layers+1):
z = np.array(theta[j] @ a)
a = sigmoid(z)
a = np.concatenate((np.ones((1, 1)), a), axis=0)
a_L = a[1:]
y_predict[i] = list(a_L).index(max(a_L))
return y_predict
| 4,269 | 1,630 |
from dpsutil.attrdict import DefaultDict, AttrDict
from dpsutil.attrdict.decorator import _get_vars_cls
from os import environ
class Environment(DefaultDict):
"""
Environment: Auto pair environment parameters fit with default, which provided before.
Implement attrdict.DefaultDict
Auto check and broadcast to default value's type.
Example:
env = Environment(HOME="./", DEFAULT_KEY="default_value")
env # {'HOME': '/home/user_name', 'DEFAULT_KEY': 'default_value'}
# Get item with both lower and upper key.
env.home # '/home/user_name'
env.HOME # '/home/user_name'
**Note: KEY must be upper or lower. Otherwise, raise KeyError
Compare (without space character):
# Regularly way. -> 433 characters
configs = {
'kafka_host': f'{os.environ.get('KAFKA_HOST', 'localhost')}:{os.environ.get('KAFKA_PORT', '9092')}',
'kafka_user_name': os.environ.get('KAFKA_USER_NAME'),
'kafka_password': os.environ.get('KAFKA_PASSWORD'),
'redis_host': os.environ.get('REDIS_HOST', 'localhost'),
'redis_port': os.environ.get('REDIS_PORT", '6379'),
'redis_password': os.environ.get('REDIS_PASSWORD'),
'redis_expire_time': int(os.environ.get('REDIS_EXPIRE_TIME', 60))
}
# With Environment -> 185 characters
configs = Environment(KAFKA_HOST='localhost', KAFKA_PORT='9092', KAFKA_USER_NAME=None, KAFKA_PASSWORD=None,
REDIS_HOST='localhost', REDIS_PORT='6379', REDIS_PASSWORD=None, REDIS_EXPIRE_TIME=60)
==================
Supported decorator.
@environment.decorator
Decorator create EnvDict base on attribute of class.
@environment.env_decorator
class CustomEnv:
KAFKA_HOST = 'localhost'
KAFKA_PORT = '9092'
KAFKA_USER_NAME = None
KAFKA_PASSWORD = None
REDIS_HOST = 'localhost'
REDIS_PORT = '6379'
REDIS_PASSWORD = None
REDIS_EXPIRE_TIME = 60
"""
def __init__(self, *args, **default_params):
super().__init__(*args, **default_params)
def setdefault(self, _k=None, _v=None, **kwargs):
if _k:
kwargs.update({_k: _v})
for k, v in kwargs.items():
k = self._cvt_key(k)
super().setdefault(k, v)
if k in environ:
super().__setitem__(k, environ[k])
@staticmethod
def _cvt_key(key):
if key.islower():
key = key.upper()
if not key.isupper():
raise KeyError(f"Environment name must be {key.upper()} or {key.lower()}. But got: {key}")
return key
def _cvt_value(self, key, value):
if value is not None and self.get_default(key) is not None \
and not isinstance(value, self.get_default(key).__class__):
try:
value = self.get_default(key).__class__(value)
except ValueError:
raise ValueError("Type of default is't same as set value. Change default before set.")
return value
def __getitem__(self, key):
key = self._cvt_key(key)
return super().__getitem__(key)
def __setitem__(self, key, value):
key = self._cvt_key(key)
value = self._cvt_value(key, value)
return super().__setitem__(key, value)
def __delitem__(self, key):
raise AttributeError("Clear value doesn't supported. To get default value by get_default function.")
def __getattr__(self, key):
key = self._cvt_key(key)
return super().__getattr__(key)
def __setattr__(self, key, value):
key = self._cvt_key(key)
value = self._cvt_value(key, value)
return super().__setattr__(value, value)
def __delattr__(self, key, **kwargs):
key = self._cvt_key(key)
return super().__delattr__(key)
def get_lower_dict(self):
curr = AttrDict(self)
for k in list(curr.keys()):
value = curr.pop(k)
curr[k.lower()] = value
return curr
def env_decorator(_cls):
"""
Decorator create EnvDict base on attribute of class.
@environment.env_decorator
class CustomEnv:
KAFKA_HOST = 'localhost'
KAFKA_PORT = '9092'
KAFKA_USER_NAME = None
KAFKA_PASSWORD = None
REDIS_HOST = 'localhost'
REDIS_PORT = '6379'
REDIS_PASSWORD = None
REDIS_EXPIRE_TIME = 60
"""
def instance():
return Environment(_get_vars_cls(_cls))
return instance
__all__ = ['Environment', 'env_decorator']
| 4,597 | 1,501 |
#!/usr/bin/env python3
import requests
import sys
import datetime
from dateutil.parser import parse
import argparse
class User:
cache = {}
def __init__(self, userurl, token):
headers = {'Authorization': 'token {}'.format(token)}
if userurl in User.cache:
self.info = User.cache[userurl]
else:
self.info = requests.get(userurl, headers=headers).json()
def __str__(self):
try:
if not self.info['name']:
return self.info['login']
return self.info['name']
except:
return "None"
class ScyllaPR:
def __init__(self, json, token):
self.token = token
self.created_at = parse(json['created_at']).date()
self.url = json['html_url']
try:
self.closed_at = parse(json['closed_at']).date()
except TypeError:
self.closed_at = None
try:
self.merged_at = parse(json['merged_at']).date()
except TypeError:
self.merged_at = None
self.title = json['title']
self.user = User(json['user']['url'], self.token)
self.reviewers = [ User(x['url'], self.token) for x in json['requested_reviewers'] ]
def timeToClose(self):
return (self.closed_at - self.created_at).days
def openFor(self):
return (datetime.date.today() - self.created_at).days
def isOpen(self):
return not self.closed_at
def isAbandoned(self):
return self.closed_at and not self.merged_at
def isMerged(self):
return self.merged_at
def needsAttention(self, days=15):
return self.isOpen() and datetime.date.today() - datetime.timedelta(days=days) > self.created_at
def __str__(self):
s = ""
s += "\tAuthor : {}\n".format(self.user)
s += "\tTitle : {}\n".format(self.title)
s += "\tURL : {}\n".format(self.url)
if self.isOpen():
s += "\tCreated at : {} ({} days ago)\n".format(self.created_at, self.openFor())
else:
s += "\tCreated at : {} and Closed at {} ({} after days)\n".format(self.created_at, self.closed_at, self.timeToClose())
return s
def read_all(dummy):
return True
def getGithubData(url, token, add_criteria = read_all):
ret = []
headers = {'Authorization': 'token {}'.format(token)}
while True:
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
print("Can't contact github API", file=sys.stderr)
sys.exit(-1)
json = resp.json()
ret += [ ScyllaPR(x, token) for x in json if add_criteria(x) ]
if 'next' in resp.links:
url = resp.links['next']['url']
else:
return ret
def printHistogram(closedPR, action = "merge", actor=True):
bins = { 0 : 0,
1 : 0,
2 : 0,
3 : 0,
4 : 0,
5 : 0,
6 : 0,
7 : 0,
15 : 0,
21 : 0,
30 : 0,
60 : 0,
120 : 0
}
sorted_keys = sorted(bins.keys())
data = [ x.timeToClose() for x in closedPR ]
for x in data:
for k in sorted_keys:
if x <= k:
bins[k] += 1
break
print("\tAverage time to {}: {:d} days".format(action, int(sum(data) / len(data))))
print("\tPeak time to {}: {:d} days".format(action, int(max(data))))
print("\tHistogram of {} time: in days".format(action))
while bins[sorted_keys[-1]] == 0:
sorted_keys.pop()
for k in sorted_keys:
print("\t\t{:3d}: {}".format(k, bins[k] * '@'))
def printStats(days, openPR, abandonedPR = None, mergedPR = None):
if days:
period = "for the past {days} days".format(days)
else:
period = "For the entire life of the repository"
if mergedPR:
print("Merged Pull Requests {period}: {m}\n".format(period=period, m=len(mergedPR)))
printHistogram(mergedPR, "merge")
if abandonedPR:
print("\nAbandoned Pull Requests {period}: {m}\n".format(period=period, m=len(abandonedPR)))
printHistogram(abandonedPR, "abandon")
print("\nCurrently Open Pull Requests: {m}\n".format(m=len(openPR)))
attDay = 15
openPR.sort(key=lambda x: x.openFor(), reverse=True)
needsAttention = [ str(x) for x in openPR if x.needsAttention(attDay) ]
if len(needsAttention) > 0:
print("Pull Requests needing attention: (open for more than {} days):".format(attDay))
[ print(x) for x in needsAttention ]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parse github statistics about our repos')
parser.add_argument('--repo', type=str, default='scylla', help='Repository')
parser.add_argument('--period', type=int, help='days to look back')
parser.add_argument('--token', type=str, required=True, help='github authentication token. Without it, this will be rate limited and fail anyway')
parser.add_argument('--open-only', action='store_true', help='Only look at open PRs')
args = parser.parse_args()
open_pr_url = 'https://api.github.com/repos/scylladb/{}/pulls?state=open?sort=created_at?direction=desc'
closed_pr_url = 'https://api.github.com/repos/scylladb/{}/pulls?state=closed?sort=closed_at?direction=desc'
openPR = getGithubData(open_pr_url.format(args.repo), args.token)
abandonedPR = []
mergedPR = []
if not args.open_only:
def shouldIncludePR(data):
days = args.period
if not days:
return True
return datetime.date.today() - datetime.timedelta(days=days) < parse(data['closed_at']).date()
closedPR = getGithubData(closed_pr_url.format(args.repo), args.token, shouldIncludePR)
for x in closedPR:
if x.isOpen():
raise Exception("Not expecting an open PR")
if x.isAbandoned():
abandonedPR.append(x)
elif x.isMerged():
mergedPR.append(x)
printStats(args.period, openPR, abandonedPR, mergedPR)
| 6,192 | 1,986 |
"""
Objective
In this challenge, we go further with normal distributions.
Task
The final grades for a Physics exam taken by a large group of students have a mean of mu=70 and
a standard deviation of sigma=10. If we can approximate the distribution of these grades by a normal distribution,
what percentage of the students:
1. Scored higher than 80 (i.e., have a grade > 80)?
2. Passed the test (i.e., have a grade >= 60)?
3. Failed the test (i.e., have a < 60)?
Find and print the answer to each question on a new line, rounded to a scale of 2 decimal places.
"""
from math import erf, sqrt
def cdf_norm(mu=70, sigma=10, lower=-float('inf'), upper=80):
"""
Return the cumulative probability of normal distribution
"""
return 0.5*(1+erf((upper - mu)/sigma/sqrt(2))) - 0.5*(1+erf((lower - mu)/sigma/sqrt(2)))
# print the probability of grade > 80
print(round(100*(1-cdf_norm()), 2))
# print the probability of grade >= 60
print(round(100*(1-cdf_norm(upper=60)), 2))
# print the probability of grade < 60
print(round(100*(cdf_norm(upper=60)), 2)) | 1,068 | 369 |
import numpy as np
from astropy import units as u
def C3toDV(C3, muE, rOrbit, outputUnits=None):
"""
Assumes all units are SI base if not stated
:param C3:
:param muE:
:param rOrbit:
:param outputUnits:
:return:
"""
# print(C3, muE, rOrbit)
DV = np.sqrt(C3 + (2*muE/rOrbit)) - np.sqrt(muE/rOrbit)
if outputUnits is not None:
DV = DV.to(outputUnits)
return DV | 418 | 172 |
from setuptools import setup, find_packages
requirements = [
"tqdm",
]
setup(
name='vmz_interface',
version='0.1.0',
packages=find_packages(exclude=['tests']),
url='https://github.com/fksato/vmz_interface',
author='Fukushi Sato',
author_email='f.kazuo.sato@gmail.com',
description='Facebook VMZ interface',
install_requires=requirements,
classifiers=[
'Development Status :: Pre-Alpha',
'Intended Audience :: Developers',
'License :: Apache License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 654 | 235 |
from flask import Blueprint, request, jsonify, make_response
from api.v2.models.parties import PartiesModelDb
from flask_jwt_extended import jwt_required
from . import check_user, id_conversion, resource_handler
parties_api_v2 = Blueprint('parties_v2', __name__, url_prefix="/api/v2")
@parties_api_v2.route("/parties", methods=['POST'])
@jwt_required
def api_create_parties():
party = request.get_json(force=True)
return resource_handler('party', party)
@parties_api_v2.route("/parties", methods=['GET'])
@jwt_required
def api_get_parties():
parties = PartiesModelDb().get_resource('party')
return make_response(jsonify({"status": 200, "data": parties}), 200)
@parties_api_v2.route("/parties/<party_id>/name", methods=['PATCH'])
@jwt_required
def api_edit_party(party_id):
if 'Requires Admin Privilege' not in check_user():
pid = id_conversion(party_id)
updated_party_data = request.get_json(force=True)
if {'name'} <= set(updated_party_data):
model_result = PartiesModelDb().edit_resource('party', updated_party_data['name'], pid)
if 'Invalid Id' in model_result or 'Invalid Data' in model_result:
return make_response(jsonify({"status": 400, "error": "Invalid Data ,Check id or data being updated"}),
400)
elif 'Party Exists' in model_result:
return make_response(jsonify({"status": 409, "error": "Party with similar name exists"}), 409)
return make_response(
jsonify({"status": 200, "message": "{} Updated Successfully".format(model_result[0][1])}),
200)
return make_response(jsonify({"status": 400, "error": "Please Check All Input Fields Are Filled"}), 400)
return make_response(jsonify({"status": 401, "error": "Unauthorized Access,Requires Admin Rights"}), 401)
@parties_api_v2.route("/parties/<party_id>", methods=['GET'])
@jwt_required
def api_specific_party_get(party_id):
oid = id_conversion(party_id)
party = PartiesModelDb().get_specific_resource('party', oid)
if isinstance(party, list) and len(party) >= 1:
print(party)
response_body = {
"id": party[0][0],
"name": party[0][1],
"hqAddress": party[0][2],
"logoUrl": party[0][3]
}
return make_response(jsonify({"status": 200, "data": [response_body]}), 200)
return make_response(jsonify({"status": 404, "error": "Party Not Found"}), 404)
@parties_api_v2.route("/parties/<party_id>", methods=['DELETE'])
@jwt_required
def api_specific_party_delete(party_id):
if 'Requires Admin Privilege' not in check_user():
oid = id_conversion(party_id)
party = PartiesModelDb().delete_resource('party', oid)
if isinstance(party, list):
return make_response(jsonify({"status": 200, "message": "{} Deleted".format(party[0][0])}), 200)
return make_response(jsonify({"status": 404, "error": "Party Not Found"}), 404)
return make_response(jsonify({"status": 401, "error": "Unauthorized Access,Requires Admin Rights"}), 401)
| 3,132 | 1,061 |
from pathlib import Path
import re
import time
from capture import getClipboardImage, NoImageData
PATHNAME_REGEX = re.compile(r'(\w+\.*)*\w')
def regexMatch(regex, s):
return regex.fullmatch(s) != None
def imgCompare(img1, img2):
if (not img1) or (not img2):
return False
h, w = img1.size
if not img2.size == (h, w):
return False
return img1.tobytes() == img2.tobytes()
def main():
iwdir = input('Working directory: ')
if not regexMatch(PATHNAME_REGEX, iwdir):
raise Exception('Invalid directory name.')
wdir = Path('saves') / iwdir
wdir.mkdir(parents=True, exist_ok=True)
## Init workspace
wdinfo = Path(wdir) / '.wdinfo'
if wdinfo.is_file():
with wdinfo.open() as file:
try:
wdcnt = int(file.readline())
except:
raise Exception('Error while loading workspace.')
else:
wdcnt = 0
## Wait for input
lstimg = None
while True:
try:
img = getClipboardImage()
except NoImageData:
time.sleep(.1)
continue
except (KeyboardInterrupt, SystemExit):
break
else:
if imgCompare(img, lstimg):
time.sleep(.2)
continue
img_path = wdir / '{}.png'.format(wdcnt)
img.save(img_path.resolve())
wdcnt += 1
lstimg = img
finally:
with wdinfo.open('w') as file:
file.write('{}\n'.format(wdcnt))
main() | 1,307 | 565 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["results"]
import os
import triangle
import numpy as np
import cPickle as pickle
import matplotlib.pyplot as pl
def results(fn):
model, sampler = pickle.load(open(fn, "rb"))
mu = np.median(model.f)
ppm = lambda f: (f / mu - 1) * 1e6
# Plot the data.
fig = pl.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.plot(model.t, ppm(model.f), ".k")
ax.set_xlim(np.min(model.t), np.max(model.t))
ax.set_xlabel("time since transit [days]")
ax.set_ylabel("relative flux [ppm]")
fig.subplots_adjust(left=0.2, bottom=0.2, top=0.9, right=0.9)
# Plot the predictions.
samples = sampler.flatchain
t = np.linspace(model.t.min(), model.t.max(), 1000)
for i in np.random.randint(len(samples), size=10):
model.vector = samples[i]
ax.plot(t, ppm(model.predict(t)), color="#4682b4", alpha=0.5)
fig.savefig(os.path.splitext(fn)[0] + "-results.pdf")
# Plot the corner plot.
fig = triangle.corner(samples, labels=model.labels,
truths=model.true_vector)
fig.savefig(os.path.splitext(fn)[0] + "-triangle.png")
if __name__ == "__main__":
import sys
results(sys.argv[1])
| 1,288 | 509 |
import torch
import torch.nn as nn
import numpy as np
class Encoder(nn.Module):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim, input_shape, name="encoder", **kwargs):
super(Encoder, self).__init__()
self.dense_proj = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.Flatten(),
nn.Linear(in_features=10240, out_features=latent_dim),
nn.ReLU(),
)
def forward(self, x):
return self.dense_proj(x)
class Decoder(nn.Module):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, output_shape, latent_dim, name="decoder", **kwargs):
super(Decoder, self).__init__()
self.dense_proj = nn.Sequential(
nn.Linear(in_features=latent_dim, out_features=8*10*128), # Must adjust number here
nn.ReLU()
)
self.conv_proj = nn.Sequential(
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=(3,3), stride=(2,2), padding=(1,1), output_padding=(1,1)),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1,1)),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=(3,3), stride=(2,2), padding=(1,1), output_padding=(1,1)),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=32, out_channels=3, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
nn.Sigmoid()
)
def forward(self, inputs):
x = self.dense_proj(inputs)
x = x.view(-1, 128, 8, 10)
x = self.conv_proj(x)
return x
class AutoEncoder(nn.Module):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(
self,
input_shape=(64,80,3),
latent_dim=16,
name="autoencoder",
**kwargs
):
super(AutoEncoder, self).__init__()
self.encoder = Encoder(latent_dim=latent_dim, input_shape=input_shape)
self.decoder = Decoder(output_shape=input_shape, latent_dim=latent_dim)
def forward(self, inputs):
z = self.encoder(inputs)
reconstructed = self.decoder(z)
return reconstructed
| 2,681 | 1,030 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from os import access, X_OK
from os.path import isabs, isdir
class MetaMapLite:
""" Abstract base class for extracting concepts from text using
MetaMapLite. To use this you will need to have downloaded the
recent MetaMapLite software from NLM. metamap_home should point
to the public_mm_lite folder which contains metamaplite.sh.
Subclasses need to override the extract_concepts method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, metamap_home):
self.metamap_home = str(metamap_home)
assert isdir(self.metamap_home), "metamap_home: {0} :: Please provide public_mm_lite directory path" \
" which contains metamaplite.sh".format(self.metamap_home)
assert access(self.metamap_home, X_OK), "User doesn't have executable access to metamap_home: {}".format(self.metamap_home)
assert isabs(self.metamap_home), "metamap_home: {0} should be an absolute path".format(self.metamap_home)
@abc.abstractmethod
def extract_concepts(self, sentences=None, ids=None, filename=None):
""" Extract concepts from a list of sentences using MetaMapLite. """
return
@staticmethod
def get_instance(metamap_home, backend='subprocess', **extra_args):
extra_args.update(metamap_home=metamap_home)
assert isabs(metamap_home), "metamap_home: {0} should be an absolute path".format(metamap_home)
if backend == 'subprocess':
from .SubprocessBackendLite import SubprocessBackendLite
return SubprocessBackendLite(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess')" % backend)
| 2,277 | 674 |
# -*- coding: utf-8 -*-
"""
This example demonstrates fitting a model to a (simulated) dataset using
numpyext.chi2_fit, which wraps Minuit.
"""
import numpy as np
from matplotlib import pyplot
from pyik.fit import ChiSquareFunction
from pyik.mplext import cornertext
np.random.seed(1)
def model(x, pars):
"""A slightly complex function. Needs to be vectorized."""
a0, a1, x_break = pars # unpack parameter vector
x = np.atleast_1d(x) # x needs to be a numpy array
y = np.empty_like(x)
mask = x <= x_break
y[mask] = a0 * x[mask]
y[~mask] = a0 * x[~mask] + a1 * (x[~mask] - x_break)**2
return y
# Simulate a dataset of n measurements
n = 20
parsTrue = (2.0, 0.5, 13.0)
xs = np.linspace(0, 20, n)
ys = model(xs, parsTrue)
# Add some noise to the points
eys = 1.5 * np.ones(n)
ys += np.random.randn(n) * eys
# Perform a fit to the data points; reuse the errors used to generate the noise
# Note: fits to data without xerrors are much faster
starts = (1.0, 1.0, 10.0) # starting values
# define bounds for parameter "x_break"
lower_bounds = (-np.inf, -np.inf, 0.0)
upper_bounds = (np.inf, np.inf, 20.0)
pars, cov, chi2, ndof = \
ChiSquareFunction(model, xs, ys, eys) \
.Minimize(starts,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds)
# Generate a plot of the fit
new_xs = np.linspace(0., 20, 1000)
figure = pyplot.figure()
pyplot.plot(new_xs, model(new_xs, pars), 'b')
pyplot.errorbar(xs, ys, eys, fmt='ok')
pyplot.xlim(-1, 21)
pyplot.xlabel("x")
pyplot.ylabel("y")
s = "Fit Example:\n"
for i, label in enumerate(("a_0", "a_1", "x_{brk}")):
s += "$%s = %.2f \pm %.2f$ (True: $%.1f$)\n" % (label, pars[i], cov[i, i]**0.5, parsTrue[i])
s += "$\\chi^2 / n_{dof} = %.3f$" % (chi2 / ndof)
cornertext(s)
pyplot.show()
| 1,779 | 759 |
#############################################################
## ##
## Copyright (c) 2003-2017 by The University of Queensland ##
## Centre for Geoscience Computing ##
## http://earth.uq.edu.au/centre-geoscience-computing ##
## ##
## Primary Business: Brisbane, Queensland, Australia ##
## Licensed under the Open Software License version 3.0 ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
#############################################################
__docformat__ = "restructuredtext en"
import esys.lsm.doc.Util
import esys.lsm.util.InstallInfo
__esysParticleOverviewSection = \
"""
{pkgName:s} Overview
==================================
The Lattice Solid Model (LSM) [MoraPAGEOPH1994]_, [PlaceJCompPhys1999]_ is a particle-based
model similar to the Distinct Element Model [CundallGeotech1979]_.
The model consists of particles which are characterized by
their shape, mass, position, orientation and velocity. The particles
interact with their nearest neighbours by imparting contact forces.
Typically, Discrete Element Model (DEM) particles are spherical and the
contact forces consist of a linear elastic normal component and linear
elastic tangential component. {pkgName:s} is a parallel implementation
of the LSM with a Python_ scripting interface.
.. _Python: http://www.python.org
Particle Types
--------------
Currently, there exist three types of {pkgName:s} spherical particles:
*non-rotational*, *rotational*, *thermal-rotational*:
Non-rotational Spheres
Non-rotational spherical particles possess no rotational
degrees of freedom. Objects of class `esys.lsm.NRotSphere`
represent non-rotational spherical particles.
Rotational Spheres
Rotational spherical particles possess orientation information.
Particles of this type change orientation according to the
applied moments. Objects of class `esys.lsm.RotSphere`
represent rotational spherical particles.
Thermal Rotational Spheres
Thermal rotational spherical particles are the same as "Rotational Spheres"
with the addition of thermal properties (temperature and thermal expansion).
Objects of class `esys.lsm.RotThermalSphere`
represent thermal rotational spherical particles.
Inter-particle Interactions
---------------------------
Interactions between model particles are also classified as
*non-rotational* and *rotational*. Two spherical particles
involved in a non-rotational interaction have
all forces applied at the centre of mass.
Two spherical particles involved in a
rotational interaction experience moments
due to forces which are, typically, applied
at a *contact point*. The inter-particle interactions
include:
Non-rotational Elastic
Purely linear elastic repulsion of particles when in contact.
Non-rotational Bonded
Linear elastic attraction and repulsion while bond remains intact.
Bond *breaks* when a threshold separation distance is reached.
Non-rotational Friction
Linear elastic repulsion, linear elastic shear rigidity and Coulomb
dynamic friction law.
Rotational Elastic
Linear elastic repulsion as well as linear elastic shear rigidity.
Rotational Bonded
Linear elastic tension, compression, shear, torsion and bending
forces while bond remains intact. Bond *breaks* if a threshold
force limit is reached.
Rotational Friction
Linear elastic repulsion, linear elastic shear rigidity and Coulomb
dynamic friction law.
Thermal Non-rotational Elastic
Linear elastic repulsion as well as heat transfer.
Thermal Rotational Bonded
Same as "Rotational Bonded" with addition of heat transfer.
Thermal Rotational Friction
Same as "Rotational Friction" with addition of heat transfer and
heat generation during frictional slip.
Fixed objects
-------------
Particles not only interact with other particles, but also with
*fixed* objects within the model. These fixed objects are not
subject to the laws of motion and provide a means of imposing
particular types of boundary conditions. Fixed objects include:
Walls
An infinite plane characterized by position and normal direction.
Linear Mesh
A piecewise linear mesh which can be used to represent a surface in 2D.
Triangular mesh
A triangular mesh which can be used to represent a surface in 3D.
"""
__citSection = \
"""
References
==========
.. [CundallGeotech1979] P.A. Cundall and O.A.D Strack
(1979)
"A Discrete Numerical Model for Granular Assemblies",
*Ge\'otechnique*,
**vol. 29**,
pp. 47-65.
.. [MoraPAGEOPH1994] P. Mora and D. Place
(1994)
"Simulation of the Stick-Slip Instability",
*Pure Appl. Geophys.*,
**vol. 143**,
pp. 61-87.
.. [PlaceJCompPhys1999] D. Place and P. Mora
(1999)
"The Lattice Solid Model to Simulate the Physics of Rocks and Earthquakes:
Incorporation of Friction",
*J. Comp. Physics*,
**vol. 150**,
pp. 332-372.
"""
__doc__ = \
esys.lsm.doc.Util.setSectionDoc("ESySParticleOverviewSection",__esysParticleOverviewSection) \
+ esys.lsm.doc.Util.setSectionDoc("CitationSection",__citSection) \
+ ("\n:summary: {0:s} overview.\n".format(esys.lsm.util.InstallInfo.pkgName))
| 5,406 | 1,550 |
import numpy as np
import sympy as sp
import bmcs_utils.api as bu
from bmcs_cross_section.pullout import MATS1D5BondSlipD
s_x, s_y = sp.symbols('s_x, s_y')
kappa_ = sp.sqrt( s_x**2 + s_y**2 )
get_kappa = sp.lambdify( (s_x, s_y), kappa_, 'numpy' )
def get_tau_s(s_x_n1, s_y_n1, Eps_n, bs, **kw):
'''Get the stress for the slip in x, y dirctions given the state kappa_n'''
_, _, kappa_n = Eps_n
kappa = get_kappa(s_x_n1, s_y_n1)
# adapt the shape of the state array
kappa_n_ = np.broadcast_to(kappa_n, kappa.shape)
kappa_n1 = np.max(np.array([kappa_n_, kappa], dtype=np.float_),axis=0)
E_b = bs.E_b
omega_n1 = bs.omega_fn_(kappa_n1)
tau_x_n1 = (1 - omega_n1) * E_b * s_x_n1
tau_y_n1 = (1 - omega_n1) * E_b * s_y_n1
return (
np.array([s_x_n1, s_y_n1, kappa_n1]),
np.array([tau_x_n1, tau_y_n1, omega_n1])
)
def plot_tau_s(ax, Eps_n, s_min, s_max, n_s, bs, **kw):
n_s_i = complex(0,n_s)
s_x_n1, s_y_n1 = np.mgrid[s_min:s_max:n_s_i, s_min:s_max:n_s_i]
Eps_n1, Sig_n1 = get_tau_s(s_x_n1, s_y_n1, Eps_n, bs, **kw)
s_x_n1, s_y_n1, _ = Eps_n1
tau_x_n1, tau_y_n1, _ = Sig_n1
tau_n1 = np.sqrt(tau_x_n1**2 + tau_y_n1**2)
ax.plot_surface(s_x_n1, s_y_n1, tau_n1, alpha=0.2)
phi=np.linspace(0,2*np.pi,100)
_, _, kappa_n = Eps_n
kappa_0 = bs.omega_fn_.kappa_0
E_b = bs.E_b
r = max(kappa_0, kappa_n)
omega_n = bs.omega_fn_(r)
f_t = (1-omega_n)*E_b*r
s0_x, s0_y = r*np.sin(phi), r*np.cos(phi)
ax.plot(s0_x, s0_y, 0, color='gray')
ax.plot(s0_x, s0_y, f_t, color='gray')
ax.set_xlabel(r'$s_x$ [mm]');ax.set_ylabel(r'$s_y$ [mm]');
ax.set_zlabel(r'$\| \tau \| = \sqrt{\tau_x^2 + \tau_y^2}$ [MPa]');
class Explore(bu.Model):
name = 'Damage model explorer'
bs = bu.Instance(MATS1D5BondSlipD, ())
tree = ['bs']
def __init__(self, *args, **kw):
super(Explore, self).__init__(*args, **kw)
self.reset_i()
def reset_i(self):
self.s_x_0, self.s_y_0 = 0, 0
self.t0 = 0
self.Sig_record = []
self.Eps_record = []
iter_record = []
self.t_arr = []
self.s_x_t, self.s_y_t = [], []
self.Eps_n1 = np.zeros((3,), dtype=np.float_)
def get_response_i(self):
n_steps = self.n_steps
t1 = self.t0 + n_steps + 1
ti_arr = np.linspace(self.t0, t1, n_steps + 1)
si_x_t = np.linspace(self.s_x_0, self.s_x_1, n_steps + 1)
si_y_t = np.linspace(self.s_y_0, self.s_y_1, n_steps + 1)
for s_x_n1, s_y_n1 in zip(si_x_t, si_y_t):
self.Eps_n1, self.Sig_n1 = get_tau_s(s_x_n1, s_y_n1, self.Eps_n1, self.bs)
self.Sig_record.append(self.Sig_n1)
self.Eps_record.append(self.Eps_n1)
self.t_arr = np.hstack([self.t_arr, ti_arr])
self.s_x_t = np.hstack([self.s_x_t, si_x_t])
self.s_y_t = np.hstack([self.s_y_t, si_y_t])
self.t0 = t1
self.s_x_0, self.s_y_0 = self.s_x_1, self.s_y_1
return
def plot_Sig_Eps(self, ax1, Sig_arr):
tau_x, tau_y, kappa = Sig_arr.T
tau = np.sqrt(tau_x ** 2 + tau_y ** 2)
ax1.plot3D(self.s_x_t, self.s_y_t, tau, color='orange', lw=3)
def subplots(self, fig):
ax_sxy = fig.add_subplot(1, 1, 1, projection='3d')
return ax_sxy
def update_plot(self, ax):
self.get_response_i()
Sig_arr = np.array(self.Sig_record, dtype=np.float_)
Eps_arr = np.array(self.Eps_record, dtype=np.float_)
plot_tau_s(ax, Eps_arr[-1, ...],
self.s_min, self.s_max, 500, self.bs)
self.plot_Sig_Eps(ax, Sig_arr)
ax.plot(self.s_x_t, self.s_y_t, 0, color='red')
n_s = bu.Int(500, BC=True)
s_x_1 = bu.Float(0, BC=True)
s_y_1 = bu.Float(0, BC=True)
n_steps = bu.Float(20, BC=True)
s_min = bu.Float(-0.1, BC=True)
s_max = bu.Float(0.1, BC=True)
def run(self, update_progress=lambda t: t):
try:
self.get_response_i(update_progress)
except ValueError:
print('No convergence reached')
return
t = bu.Float(0)
t_max = bu.Float(1)
def reset(self):
self.reset_i()
ipw_view = bu.View(
bu.Item('s_max'),
bu.Item('n_s'),
bu.Item('s_x_1', editor=bu.FloatRangeEditor(low_name='s_min',high_name='s_max')),
bu.Item('s_y_1', editor=bu.FloatRangeEditor(low_name='s_min',high_name='s_max')),
bu.Item('n_steps'),
)
| 4,483 | 2,172 |
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
_post_schema = validate.Schema({
'hentai_video': validate.Schema({
'name': validate.text,
'is_visible': bool
}),
'videos_manifest': validate.Schema({
'servers': validate.Schema([{
'streams': validate.Schema([{
'height': validate.text,
'url': validate.text,
'id': int
}])
}])
}),
})
@pluginmatcher(re.compile(
r"https?://hanime\.tv/videos/hentai/(?P<videoid>[a-zA-Z0-9_-]+)"
))
class hanimetv(Plugin):
def get_title(self):
return self.title
def get_author(self):
return "hanime"
def get_category(self):
return "VOD"
def _get_streams(self):
videoid = self.match.group("videoid")
api_call = "https://hw.hanime.tv/api/v8/video?id={0}".format(videoid)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0",
"Referer": self.url,
}
res = self.session.http.get(api_call, headers=headers)
data = self.session.http.json(res, schema=_post_schema)
if not data:
self.logger.info("Not a valid url.")
return
self.title = data["hentai_video"]["name"]
self.logger.info("Video Name: {0}".format(self.title))
for stream in data["videos_manifest"]["servers"][0]["streams"]:
if (stream["url"]):
q = "{0}p".format(stream["height"])
s = HLSStream(self.session, stream["url"])
yield q, s
else:
q = "{0}p".format(stream["height"])
u = "https://weeb.hanime.tv/weeb-api-cache/api/v8/m3u8s/{0}.m3u8".format(stream["id"])
s = HLSStream(self.session, u)
yield q, s
__plugin__ = hanimetv
| 2,042 | 667 |
from ansiblelint import AnsibleLintRule
import os
class PlaybookExtension(AnsibleLintRule):
id = 'E101'
shortdesc = 'Playbooks should have the ".yml" extension'
description = ''
tags = ['playbook']
done = [] # already noticed path list
def match(self, file, text):
if file['type'] != 'playbook':
return False
path = file['path']
ext = os.path.splitext(path)
if ext[1] != ".yml" and path not in self.done:
self.done.append(path)
return True
return False
| 558 | 167 |
# -*- coding: utf-8 -*-
"""
(c) 2017 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import unittest
import shutil
import sys
import os
import six
import json
import pygit2
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.query
import tests
class PagureFlaskAppIndextests(tests.Modeltests):
""" Tests for the index page of flask app controller of pagure """
def test_index_logged_out(self):
""" Test the index endpoint when logged out. """
output = self.app.get("/")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
self.assertIn(
'<h3 class="m-0 font-weight-bold">All Projects '
'<span class="badge badge-secondary">0</span></h3>',
output_text,
)
tests.create_projects(self.session)
output = self.app.get("/?page=abc")
self.assertEqual(output.status_code, 200)
self.assertIn(
'<h3 class="m-0 font-weight-bold">All Projects '
'<span class="badge badge-secondary">3</span></h3>',
output.get_data(as_text=True),
)
def test_index_logged_in(self):
"""
Test the index endpoint when logged in.
It should redirect to the userdash.
"""
tests.create_projects(self.session)
# Add a 3rd project with a long description
item = pagure.lib.model.Project(
user_id=2, # foo
name="test3",
description="test project #3 with a very long description",
hook_token="aaabbbeeefff",
)
self.session.add(item)
self.session.commit()
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user):
output = self.app.get("/", follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<span class="btn btn-outline-secondary disabled '
'opacity-100 border-0 ml-auto font-weight-bold">'
"1 Projects</span>\n",
output_text,
)
self.assertNotIn(
'<h3 class="m-0 font-weight-bold">All Projects '
'<span class="badge badge-secondary">3</span></h3>',
output_text,
)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 2,712 | 850 |
'''
***************************************************************************************
© 2019 Arizona Board of Regents on behalf of the University of Arizona with rights
granted for USDOT OSADP distribution with the Apache 2.0 open source license.
***************************************************************************************
M_MessageDistributor.py
Created by: Niraj Vasant Altekar
University of Arizona
College of Engineering
This code was developed under the supervision of Professor Larry Head
in the Systems and Industrial Engineering Department.
***************************************************************************************
'''
TESTING = False
import json
import socket
import sys
from MessageDistributor import MessageDistributor
import datetime
if TESTING:
configFile = open("../../../config/simulation-tools/nojournal/bin/mmitss-phase3-master-config.json", 'r')
else:
configFile = open("/nojournal/bin/mmitss-phase3-master-config.json", 'r')
masterConfig = json.load(configFile)
configFile.close()
if TESTING:
configFile = open("../../../config/simulation-tools/nojournal/bin/mmitss-message-distributor-config.json", 'r')
else:
configFile = open("/nojournal/bin/mmitss-message-distributor-config.json", 'r')
config = json.load(configFile)
configFile.close()
msgDist = MessageDistributor(config)
receivingSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if TESTING:
receivingSocket.bind(("127.0.0.1", masterConfig["PortNumber"]["MessageDistributor"]))
else:
receivingSocket.bind((masterConfig["MessageDistributorIP"], masterConfig["PortNumber"]["MessageDistributor"]))
rawBsmLogging = config["raw_bsm_logging"]
if rawBsmLogging == True:
logfile = open(("rawBsmLog_" + ('{:%m%d%Y_%H%M%S}'.format(datetime.datetime.now())) + ".csv"), 'w')
logfile.write("timestamp,secMark,temporaryId,latitude,longitude,elevation,speed,heading,type,length,width\n")
while True:
data, addr = receivingSocket.recvfrom(40960)
msg = json.loads(data.decode())
msg = msgDist.timestampMessage(msg)
messageType = msgDist.distributeMsgToInfrastructureAndGetType(msg)
if messageType == "BSM":
msgDist.distributeBsmToClients(msg)
if rawBsmLogging == True:
logfile.write(str(msg["Timestamp_posix"]) + "," +
str(msg["BasicVehicle"]["secMark_Second"]) + "," +
str(msg["BasicVehicle"]["temporaryID"]) + "," +
str(msg["BasicVehicle"]["position"]["latitude_DecimalDegree"]) + "," +
str(msg["BasicVehicle"]["position"]["longitude_DecimalDegree"]) + "," +
str(msg["BasicVehicle"]["position"]["elevation_Meter"]) + "," +
str(msg["BasicVehicle"]["speed_MeterPerSecond"]) + "," +
str(msg["BasicVehicle"]["heading_Degree"]) + "," +
str(msg["BasicVehicle"]["type"]) + "," +
str(msg["BasicVehicle"]["size"]["length_cm"]) + "," +
str(msg["BasicVehicle"]["size"]["width_cm"]) + "\n"
)
elif messageType == "MAP":
msgDist.distributeMapToClients(msg)
elif messageType == "SSM":
msgDist.distributeSsmToClients(msg)
if rawBsmLogging == True:
logfile.close()
receivingSocket.close() | 3,255 | 1,005 |
import wx
from cgui import SimplePanel
from gui.pref.prefcontrols import mark_pref,get_pref
class NoobContactLayoutPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self,parent)
self.links = parent.links
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.grid = wx.FlexGridSizer(2,3)
self.grid.AddGrowableRow(1,1)
self.Sizer.Add(self.grid,0,wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL)
from gui import skin
g=skin.get
icons = g('AppDefaults.ez.icons')
items = self.items =[
LayoutItem(self, icons.one, dict(show_extra = True,
extra_info = 'both',
show_buddy_icon = True,
buddy_icon_pos = 'left',
buddy_icon_size = 32,
show_status_icon = True,
status_icon_pos = 'bleft',
show_service_icon = True,
service_icon_pos = 'bright'
)),
LayoutItem(self, icons.two, dict(show_extra = True,
extra_info = 'both',
show_buddy_icon = True,
buddy_icon_pos = 'right',
buddy_icon_size = 32,
show_status_icon = True,
status_icon_pos = 'bleft',
show_service_icon = True,
service_icon_pos = 'bright'
)),
LayoutItem(self, icons.three, dict(show_extra = True,
extra_info = 'both',
show_buddy_icon = True,
buddy_icon_pos = 'right',
buddy_icon_size = 32,
show_status_icon = True,
status_icon_pos = 'left',
show_service_icon = True,
service_icon_pos = 'bright'
)),
LayoutItem(self, icons.four, dict(show_extra = True,
extra_info = 'idle',
show_buddy_icon = False,
show_status_icon = True,
status_icon_pos = 'right',
show_service_icon = True,
service_icon_pos = 'left'
)),
LayoutItem(self, icons.five, dict(show_extra = True,
extra_info = 'idle',
show_buddy_icon = False,
show_status_icon = True,
status_icon_pos = 'left',
show_service_icon = True,
service_icon_pos = 'right'
)),
LayoutItem(self, icons.six, dict(show_extra = True,
extra_info = 'idle',
show_buddy_icon = False,
show_status_icon = True,
status_icon_pos = 'left',
show_service_icon = False,
))
]
self.grid.AddMany([(item, 0) for item in items])
self.selection = None
for item in items:
for key in item.prefdict:
if item.prefdict[key] != get_pref('buddylist.layout.%s'%key):
break
else:
self.SetSelection(item)
break
#
# lastselection = get_pref('buddylist.layout.ez_layout_selection',-1)
# if lastselection != -1:
# self.SetSelection(lastselection)
def SetSelection(self,item):
# if isinstance(item,int):
# newselection = self.items[item]
# else:
newselection = item
print newselection
oldselection = self.selection
if oldselection:
oldselection.selected = False
oldselection.Refresh()
self.selection = newselection
newselection.selected = True
self.Refresh()
# i = self.items.index(newselection)
#
# mark_pref('buddylist.layout.ez_layout_selection',i)
from peak.events import trellis
@trellis.modifier
def update():
links = self.links
for key in newselection.prefdict:
if key in links:
links[key].value = newselection.prefdict[key]
else:
value = newselection.prefdict[key]
mark_pref('buddylist.layout.%s'%key,value)
update()
class LayoutItem(SimplePanel):
def __init__(self,parent, bitmap, prefdict):
SimplePanel.__init__(self,parent,wx.FULL_REPAINT_ON_RESIZE)
self.prefdict=prefdict
self.bitmap = bitmap
self.MinSize = self.bitmap.Size + (16,16)
self.selected = False
self.Bind(wx.EVT_PAINT,self.OnPaint)
self.Bind(wx.EVT_MOTION,lambda e: self.Refresh())
self.Bind(wx.EVT_LEAVE_WINDOW,self.OnMouseLeave)
self.Bind(wx.EVT_LEFT_DOWN,self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP,self.OnLeftUp)
def OnPaint(self,event):
dc = wx.AutoBufferedPaintDC(self)
rect = wx.RectS(self.Size)
dc.Brush = wx.WHITE_BRUSH
dc.Pen = wx.TRANSPARENT_PEN
dc.DrawRectangleRect(rect)
if rect.Contains(self.ScreenToClient(wx.GetMousePosition())):
dc.Brush = wx.Brush(wx.Color(238,239,255))
dc.Pen = wx.Pen(wx.Color(128,128,255))
dc.DrawRoundedRectangleRect(rect,4)
if wx.GetMouseState().LeftDown():
rect2 = rect.Deflate(5,5)
dc.Pen = wx.TRANSPARENT_PEN
dc.Brush = wx.Brush(wx.Color(200,200,255))
dc.DrawRectangleRect(rect2)
if self.selected:
rect2 = rect.Deflate(4,4)
dc.Pen = wx.TRANSPARENT_PEN
dc.Brush = wx.Brush(wx.Color(128,128,255))
dc.DrawRectangleRect(rect2)
dc.DrawBitmap(self.bitmap,8,8,True)
def OnLeftUp(self,event):
self.Parent.SetSelection(self)
def OnLeftDown(self,event):
self.Refresh()
def OnMouseLeave(self,event):
self.Refresh()
| 7,281 | 1,961 |
def selection_7():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(120.0,2000.0,161,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([125.875,137.625,149.375,161.125,172.875,184.625,196.375,208.125,219.875,231.625,243.375,255.125,266.875,278.625,290.375,302.125,313.875,325.625,337.375,349.125,360.875,372.625,384.375,396.125,407.875,419.625,431.375,443.125,454.875,466.625,478.375,490.125,501.875,513.625,525.375,537.125,548.875,560.625,572.375,584.125,595.875,607.625,619.375,631.125,642.875,654.625,666.375,678.125,689.875,701.625,713.375,725.125,736.875,748.625,760.375,772.125,783.875,795.625,807.375,819.125,830.875,842.625,854.375,866.125,877.875,889.625,901.375,913.125,924.875,936.625,948.375,960.125,971.875,983.625,995.375,1007.125,1018.875,1030.625,1042.375,1054.125,1065.875,1077.625,1089.375,1101.125,1112.875,1124.625,1136.375,1148.125,1159.875,1171.625,1183.375,1195.125,1206.875,1218.625,1230.375,1242.125,1253.875,1265.625,1277.375,1289.125,1300.875,1312.625,1324.375,1336.125,1347.875,1359.625,1371.375,1383.125,1394.875,1406.625,1418.375,1430.125,1441.875,1453.625,1465.375,1477.125,1488.875,1500.625,1512.375,1524.125,1535.875,1547.625,1559.375,1571.125,1582.875,1594.625,1606.375,1618.125,1629.875,1641.625,1653.375,1665.125,1676.875,1688.625,1700.375,1712.125,1723.875,1735.625,1747.375,1759.125,1770.875,1782.625,1794.375,1806.125,1817.875,1829.625,1841.375,1853.125,1864.875,1876.625,1888.375,1900.125,1911.875,1923.625,1935.375,1947.125,1958.875,1970.625,1982.375,1994.125])
# Creating weights for histo: y8_M_0
y8_M_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.78092644877,4.19643634479,4.34791621285,4.6672559347,4.63450396322,4.58946800245,4.58946800245,4.72047988834,4.83920778492,5.04391160662,4.98249966011,5.07666357809,4.90061973143,5.29774338553,5.28136739979,5.13397952817,5.10532355313,5.01115963515,5.07666357809,5.39600329994,5.51063520009,5.44513125715,5.54339117156,5.21176746041,5.38371931064,5.33459135343,5.64574308241,5.56795515016,5.73171900752,5.21586345684,5.3427793463,5.50654320365,5.53520317869,5.42465927498,5.39190730351,5.39600329994,5.76037498256,5.37143932133,5.44513125715,5.38781530707,5.39600329994,5.62527110024,5.66211906814,5.73990700039,5.53929517513,5.28955539266,5.51473119652,5.17082749607,5.55157916443,5.72762301109,5.5843311359,5.28955539266,5.13397952817,5.36325132847,4.98659565654,5.22405144971,5.10941554956,5.20357946754,5.10941554956,5.05619559592,5.16673549964,5.20767546398,5.33459135343,4.85967576709,4.99068765298,5.00706363871,4.72047988834,4.96612367437,4.63859595966,4.78598383128,4.80645581345,4.79826782058,4.88424374569,4.79417182415,4.83101979205,4.52805605594,4.4871160916,4.63040796679,4.58946800245,4.6549719454,4.45845611657,4.43798813439,4.31926023781,4.42980014153,4.56899602028,4.44208013083,4.26603628417,4.2332843127,4.16368437332,4.44208013083,4.27831627347,4.33154022711,4.02448449457,4.01220050527,3.99992011596,3.92213258372,3.97535533736,3.75836912636,3.90575619798,3.88528581581,3.94260296589,3.72152235846,3.64782882264,3.75427512993,3.7993098907,3.40627783304,3.49225375815])
# Creating weights for histo: y8_M_1
y8_M_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121240822392,0.0,0.0,0.0,0.0,0.0121313846429,0.012170493784,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121753353338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y8_M_2
y8_M_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0201069897442,0.0200520625826,0.0200902467322,0.0301386860576,0.0,0.0401901996181,0.0301534291639,0.0401373219466,0.0401354749263,0.0,0.0,0.0100187051905,0.0,0.0200525088425,0.0,0.0100340928946,0.010029956726,0.0100340928946,0.0201069236316,0.010030973207,0.0,0.0100187051905,0.0,0.020059913452,0.0100546539083,0.0,0.0200717021522,0.0100355638996,0.0301035100306,0.0301212943158,0.0,0.020081296741,0.0100329193962,0.0100459435753,0.0100702895346,0.0200800364699,0.0200909202541,0.0200976843979,0.0100187051905,0.0,0.0,0.0,0.0,0.0,0.0200856932279,0.0301465658512,0.0200554012681,0.0100262833455,0.0,0.0301023365322,0.0301327565851,0.0200707931042,0.0,0.0,0.0,0.0100602900062,0.0,0.0200628802542,0.0,0.0,0.0,0.0100568562838,0.0,0.010018415948,0.0200878294908,0.0100609841883,0.0100584801742,0.010036972924,0.0100407289452,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0200899492255,0.0100546539083,0.0,0.0100329193962,0.0,0.0,0.0100696697291,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0200968290664,0.0,0.0,0.0,0.0200913995704,0.0100546539083,0.0100407289452,0.0,0.0,0.0,0.0100229529241,0.0100271593373,0.0200494552675])
# Creating weights for histo: y8_M_3
y8_M_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.016467068046,0.033020819304,0.0660267939506,0.022005470001,0.0605417458234,0.0549595614727,0.0330115119394,0.0385177017189,0.0495202173402,0.0274719724628,0.0439881245678,0.0329905937288,0.0550531226281,0.0330729624838,0.0384914493443,0.0329847558053,0.0770149280482,0.0769908775905,0.0550048185839,0.0494478222126,0.0384611993937,0.0550147718983,0.0495028701351,0.0660128999362,0.0494861323166,0.0494600911961,0.0604524097483,0.0549908839438,0.054978127451,0.0440215595791,0.044014409443,0.0440062842884,0.0220022768152,0.0495135140877,0.0605064014008,0.0495056326877,0.0549466424769,0.0605267142873,0.0880243500509,0.0769583363463,0.0384865458135,0.0825579491536,0.0714909198046,0.0494663881909,0.0385217805465,0.093498672974,0.0494654131723,0.027476477861,0.0385032308185,0.0275048671512,0.0440315535192,0.0605509678738,0.0385348864209,0.0440292378502,0.0660262251897,0.0109989608662,0.0495139609712,0.033022789654,0.0440291565986,0.0660339034608,0.0495150578671,0.0274811335746,0.0440349254584,0.0330179226864,0.0494890167465,0.0440275721935,0.0660108280217,0.0220213424905,0.0165016040157,0.0274971766924,0.0440586509099,0.0329953266314,0.0274735812434,0.0604587473689,0.0659462736683,0.0330062224637,0.0385075696511,0.0385122172395,0.0549511113119,0.0384953697315,0.0330256943968,0.0385095725017,0.0550025435406,0.0329908252958,0.0439449799968,0.0440170501182,0.0164954248356,0.0164961114112,0.00551630153443,0.0384828529308,0.0275246153395,0.0110027024999,0.0495168047753,0.0110260298188,0.0109935170126,0.0329819810649,0.0219834630197,0.0219787341797,0.0165017908942,0.021995618251,0.0165183743348,0.0110160968173,0.027530579203,0.0110268870226,0.0110078538479,0.0275157670462,0.00549901933056])
# Creating weights for histo: y8_M_4
y8_M_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00197027550081,0.00394952315682,0.00394805331706,0.00197208323937,0.00394731619262,0.00591465602221,0.0049318955883,0.00789053831836,0.00295908284024,0.00690804643985,0.00492704555802,0.00789948081221,0.00887485797688,0.00690768569379,0.00592003915499,0.00690746924616,0.00887773592873,0.00987371970967,0.0118414852196,0.00691111278131,0.00986770326714,0.0118350198487,0.0138150146498,0.00887579992491,0.0167695168168,0.010855918983,0.00690918479406,0.0108539148383,0.0177644183596,0.020733422518,0.0167638170292,0.0138102888765,0.0138146498955,0.0167698214468,0.0118430484525,0.0236884617958,0.0167671719675,0.0128264021133,0.0236846138379,0.0207369417962,0.0384858120008,0.0246552251464,0.019740096233,0.0226995726873,0.0286270632524,0.0227092967976,0.0315806274797,0.0295947725593,0.0187597087065,0.0404452843598,0.0345472547224,0.0375002056814,0.0246604319144,0.0217110002336,0.0286303580664,0.0315931173097,0.025667294193,0.035510599002,0.037488389244,0.0236896803158,0.0296010094577,0.0355199463331,0.0365179222339,0.0355182428101,0.0305913575837,0.0236793028543,0.0315771522927,0.0325532148749,0.0463906599127,0.0305928967668,0.0246773148297,0.0296191028765,0.0463764705679,0.0305930891647,0.0414399413969,0.0256571492123,0.0345520326035,0.0404506955506,0.031575536952,0.0424380054801,0.0404769899297,0.0434100557645,0.0286233796344,0.0266487839992,0.0296034024066,0.0375073364284,0.0355257543446,0.0246621955618,0.0325592032594,0.0305981436178,0.0315898305124,0.0345392982678,0.0236820525409,0.0286232834354,0.0365091601131,0.0355226399037,0.0305958508762,0.0246805214613,0.0345343841049,0.0325702020057,0.0404531806901,0.0236774911075,0.0325773407693,0.0197404609873,0.0266355245775,0.0226978972223,0.0365152527131])
# Creating weights for histo: y8_M_5
y8_M_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000251938705986,0.0,0.000252305648909,0.0002524496493,0.000252082346275,0.000505256118684,0.000251550317047,0.0,0.00100821879408,0.000252565441778,0.000504063784247,0.00100804954661,0.00176381272972,0.0012601130476,0.000504233431831,0.000503640065399,0.000503908940815,0.0010087493429,0.0005055806097,0.000757268405308,0.00176452332904,0.000504453493556,0.000504129402652,0.00277454102769,0.00176529874653,0.00100816998039,0.00151229698237,0.00226877236524,0.00151225977193,0.00125991859306,0.00176589811465,0.0017634390249,0.00226803415819,0.0030252505489,0.00227000351057,0.00100852648038,0.00126088086296,0.00151354973375,0.00151175403008,0.00403113468814,0.00251986759464,0.00353011215693,0.00302553582892,0.00302505929526,0.00378131901764,0.00226734436471,0.00151308280278,0.0015113947293,0.00353056508397,0.00579873448009,0.00403280715725,0.00352975885784,0.00403183088342,0.00428482584567,0.00504358666959,0.0035289166216,0.00428863091295,0.00327555475643,0.00428580612063,0.00554881237974,0.00579692597282,0.0025199840273,0.00504172214661,0.00453653644768,0.00428524196239,0.00605145336491,0.00630244776603,0.00302364129753,0.00378220246544,0.00378109975614,0.00655375025356,0.00403377942996,0.00579619776857,0.00554580753691,0.00655393030406,0.00604913671511,0.00529495317508,0.0047889152371,0.00479140393515,0.00579921061364,0.00857274455886,0.00806881121119,0.00756044061894,0.00781866104687,0.00806541425839,0.00831917743612,0.00755932830694,0.00881756522831,0.00781524408846,0.00831736892885,0.00957568187154,0.00781375567097,0.00604803240537,0.0100857049273,0.00831910941704,0.0133673373885,0.0131095930941,0.0141151511418,0.00957541379635,0.0146184042987,0.00882085415082,0.00957963498034,0.0108400685178,0.0103367473419,0.00932927675767,0.0136076367897])
# Creating weights for histo: y8_M_6
y8_M_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000571674400031,0.0,0.000286439425776,0.0,0.00114685418586,0.0,0.000570006782515,0.0,0.0002863152617,0.000284909568403,0.000574595954751,0.000286450322624,0.000286764731657,0.0,0.000574212165791,0.000284852384948,0.0002863152617,0.000287128726344,0.0,0.000286439425776,0.000287128726344,0.0,0.000570397169566,0.0,0.0,0.0,0.0002863152617,0.000573312626051,0.00114767194926,0.000855710321202,0.0,0.00114175666071,0.0,0.000287276383623,0.000573726606275,0.000284468995872,0.000284540475191,0.00114194260691,0.0002863152617,0.00085747571043,0.000286183799735,0.000860014575872,0.00114628535044,0.000287772740014,0.000574099498388,0.00171888568258,0.0,0.000573215054281,0.000859751651942,0.000855365820875,0.000287128726344,0.00114753198976,0.000861860741729,0.00114346816553,0.000858949384059,0.00143494383419,0.000858809024669,0.00143481187237,0.00114442988728,0.000859505523154,0.000859800337856,0.00114721908029,0.000287772740014,0.000286823114765,0.0017229295126,0.000572128768571,0.00171794695418,0.00114786489344,0.000574257552659,0.00057395194108,0.000571234027442,0.000287772740014,0.00142779090371,0.000858079435757,0.00315403945603,0.00200631252267,0.00143411307455,0.00171937953969,0.00114571851444,0.00171905863254,0.00114306128325,0.00142758196417,0.00114167868327,0.00257856595517,0.00257476605458,0.00171679028883,0.00286293168092,0.000573893557972,0.00171545767439,0.00171991238553,0.00171669531631,0.00171834183993,0.00401011677105,0.00171878471179])
# Creating weights for histo: y8_M_7
y8_M_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.16142075517e-05,0.0,0.0,0.0,0.0,0.0,0.0,2.15983699309e-05,0.0,0.0,2.16751901451e-05,0.0,6.48229754557e-05,2.15259544109e-05,2.1608621008e-05,0.0,0.0,4.31871677194e-05,4.31500777633e-05,0.0,6.48842053155e-05,0.0,4.32972641202e-05,4.32010816917e-05,2.1608621008e-05,0.0,4.31777380696e-05,0.0,4.32343997878e-05,2.16142075517e-05,2.1655333398e-05,4.32154985786e-05,0.0,6.48049962567e-05,6.4709107195e-05,2.15933994577e-05,0.000108047903684,6.47796409759e-05,0.0,2.15259544109e-05,0.0,4.32198571723e-05,8.64498145473e-05,0.0,6.47386953407e-05,4.32177616945e-05,6.47944350488e-05,0.000129524958026,4.32275685304e-05,4.31157957474e-05,4.31698590732e-05,2.15789867617e-05,6.48095224886e-05,2.15259544109e-05,2.15774067715e-05,4.31813003817e-05,6.47836642932e-05,4.32141155632e-05,0.0,4.32269817966e-05,6.47637991641e-05,2.15912704523e-05,4.3124303387e-05,6.48629571711e-05,4.31932026953e-05,6.48075108299e-05,6.48050381662e-05,6.47414194618e-05,8.64146943402e-05,4.31736728427e-05,6.47325765457e-05,4.31823481206e-05,4.32195218958e-05,8.63922308188e-05,8.64066477057e-05,8.63133570364e-05,0.000108139434152,0.000151167974206,4.31120238874e-05,8.64006965489e-05,0.000107929760649,0.00015134743092,0.000108003270008,0.000108111270931,6.48057087191e-05,6.48629571711e-05])
# Creating weights for histo: y8_M_8
y8_M_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.83974019316e-05,0.0,0.0,0.0,0.0,0.0,0.0,2.83974019316e-05,0.0,0.0,2.83974019316e-05,0.0,0.0,0.0,0.0,0.0,0.0,2.84292668315e-05,0.0,0.0,0.0,2.83684918849e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.84489114087e-05,0.0,0.0,2.83974019316e-05,0.0,2.83498718548e-05,0.0,0.0,2.67506280594e-05,0.0,0.0,0.0,2.82532080623e-05,0.0,0.0,2.84292668315e-05,0.0,0.0,2.84080928579e-05,5.67658938165e-05,2.84080928579e-05])
# Creating weights for histo: y8_M_9
y8_M_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y8_M_10
y8_M_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0521138287,0.0,0.0,0.0,0.0,0.0,1.0529581672,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.05462838872,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y8_M_11
y8_M_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230020174047,0.230597155343,0.230551273514,0.0,0.0,0.23018625551,0.460704673945,0.461024770794,0.690790519723,0.229982515595,0.92256484756,0.691531007872,0.459685974368,0.229952465686,0.921662581772,0.230020174047,0.23018625551,0.461233045094,0.0,0.0,0.0,0.691049133384,0.229982515595,0.229952465686,0.0,0.230128538167,0.230465312331,0.461073957345,0.460063327437,0.460617444671,0.690580324073,0.229703305066,0.0,0.229479006709,0.690811654569,0.459964954335,0.230673163934,0.230114128045,0.0,0.229932022526,0.23042826871,0.230619673561,0.0,0.0,0.230673163934,0.460930624662,0.230673163934,0.0,0.0,0.0,0.230752246685,0.0,0.230645227511,0.0,0.230020174047,0.460488714246,0.460933698821,0.23058774073,0.230551273514,0.0,0.0,0.229952465686,0.0,0.0,0.230764043772,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.23018625551,0.0,0.0,0.0,0.0,0.230645227511,0.0,0.230645227511,0.0,0.0,0.0,0.0,0.0,0.0,0.23042826871,0.0,0.229952465686,0.0,0.0,0.0,0.0,0.230020174047,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y8_M_12
y8_M_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.1384016717,0.138443216656,0.193985782999,0.0830623678981,0.193914271821,0.0553889654108,0.249327202516,0.0553995439874,0.0,0.110761389775,0.0830144373293,0.166014641665,0.13852719132,0.138417289526,0.0554480900371,0.276708559354,0.0830847175454,0.13863259241,0.110806050602,0.193944199576,0.221602676653,0.138488839171,0.166332229768,0.110769506428,0.0276929401566,0.0831032973727,0.221554361409,0.0553778482885,0.138307541602,0.138455103129,0.138503572244,0.304717206551,0.166303263702,0.165999177709,0.0830418646933,0.249127940601,0.138598702498,0.138450871698,0.138508919233,0.22168095812,0.0554508597007,0.0830207844753,0.13835108687,0.166090538143,0.110701880473,0.166112079972,0.138369743633,0.166070535017,0.0,0.138488146755,0.138542616808,0.16611227231,0.0829216535957,0.0554893657195,0.0831335713355,0.166173281846,0.16598894534,0.0831337636732,0.0553688468815,0.0554876731473,0.138561735181,0.138537539091,0.166128851824,0.0552076293743,0.0830163607069,0.193842375968,0.166046800537,0.083064522081,0.0277373240172,0.0830550590634,0.0830507506977,0.193846491996,0.0,0.055405237185,0.138359819005,0.0830155913558,0.138498263721,0.0277244643148,0.0553541522769,0.055427009819,0.027640512731,0.027640512731,0.0276622122767,0.0554012365597,0.0553334567344,0.0553302254601,0.0,0.0,0.027683115544,0.0552821025535,0.055481095196,0.0277205483181,0.110737001347,0.055328263615,0.0277631934454,0.0,0.0554069297572,0.0,0.11093630173,0.0,0.0554069297572,0.110733231527,0.0,0.083088948976,0.0,0.0])
# Creating weights for histo: y8_M_13
y8_M_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100450214151,0.0202018191175,0.020154633721,0.0201706251705,0.0201565514812,0.0100975170653,0.0100829700048,0.0201646655487,0.0302947299171,0.0100796988885,0.0100533236174,0.0503771563265,0.0202008723751,0.0403626188016,0.0,0.0100953565507,0.0100671484834,0.0302972606322,0.0705522117657,0.0302903906813,0.0403309818277,0.0,0.0,0.0201751586099,0.0302794424555,0.02018709606,0.0,0.0504437620791,0.0302832415627,0.0100671484834,0.020144614031,0.0,0.0605195101389,0.0302102088859,0.0201372160891,0.0302955856265,0.0504043630316,0.0504252520522,0.0504063475493,0.0201157626644,0.0201972128518,0.0202033970214,0.0100921886051,0.0504656221175,0.0302368026361,0.0100996957865,0.0403580307424,0.0100796988885,0.0302258301349,0.0201121759674,0.0302450805629,0.0201806145162,0.0403226614188,0.0302305820533,0.0201734168467,0.0403646943521,0.0201509438533,0.0201718874936,0.0201687620301,0.0502923379211,0.0302623282667,0.0503909447793,0.0302358741003,0.0100712388959,0.0100953565507,0.0201602656243,0.0100533236174,0.0201530679548,0.0100671484834,0.0302263702635,0.0,0.0201717114967,0.0201792186781,0.0,0.0100796988885,0.0403124111121,0.0201975648457,0.0403432834095,0.0201530679548,0.0100712388959,0.0,0.0201997435669,0.0100450214151,0.0,0.0302386839831,0.0201514657754,0.0504735844635,0.0302405653301,0.0201426537888,0.0,0.0403559794673,0.0302626923983,0.0504655614288,0.0503821024484,0.0201326462365,0.0100592771703,0.0201847413418,0.010103349241,0.0302577523452,0.0302652049069,0.0201010760201,0.0,0.0100733690662,0.0201624868275,0.0100953565507,0.0302478176193])
# Creating weights for histo: y8_M_14
y8_M_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00283355094161,0.0,0.0,0.0028258606674,0.0,0.0,0.00849404406756,0.0,0.0,0.00283301960937,0.00566026189448,0.00848628376984,0.00566853005501,0.00849465581141,0.00282513619339,0.00565291712087,0.00848269410312,0.0,0.00849725668462,0.0112963041191,0.00566429016999,0.011320377586,0.00848864225398,0.00849404406756,0.00566748354982,0.00283250443641,0.00848682241222,0.0113195465378,0.00848361364261,0.0056608043843,0.022631921437,0.00565841896805,0.00283102971052,0.011334166831,0.00848161297091,0.00849407099968,0.0141481000323,0.0113287688648,0.0282809334006,0.0,0.0113106320064,0.00283301960937,0.0112983047908,0.00565440992976,0.0084840176244,0.00847707683258,0.0113231939162,0.0113078195237,0.0169835481493,0.00566144306027,0.00283201542608,0.0169662769662,0.00282930990235,0.0113075271178,0.00565759946214,0.0113145871804,0.0084839753025,0.0113278685626,0.0169713017301,0.0113231708315,0.00848760729112,0.0113201275021,0.0056624587859,0.00566242031144,0.00283198118381,0.00566600997816,0.0141461686146,0.0056529363581,0.00849297063025,0.005657891868,0.00283622837898,0.0113069038316,0.00565444840421,0.0141587343719,0.0141420480004,0.0113181922369,0.00566055430034,0.00849807234308,0.00566200863477,0.005657891868,0.00849126621186,0.0113077156426,0.00283250443641,0.00282930990235,0.0113202198408,0.0,0.00564705746127,0.0084891001,0.0,0.00565548336707,0.00565157051492,0.0169864991401,0.0028258606674,0.00282544052634,0.00282513619339,0.00848646459978,0.00565943084623,0.00283102971052,0.00282347986807,0.00564901581106,0.00848026636496,0.0084916663462,0.0028309785395,0.0169567814705])
# Creating weights for histo: y8_M_15
y8_M_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00151881882101,0.0,0.0,0.0,0.0,0.00152260679112,0.0,0.00152162936482,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00303299398069,0.0,0.0,0.00151115660254,0.0,0.0,0.0,0.0,0.00151448717969,0.00152495994197,0.0,0.00151265406217,0.00150849615926,0.00153629548684,0.0,0.0,0.0,0.0,0.00151115660254,0.0,0.0030566070399,0.0,0.0,0.0,0.0,0.00153821488261,0.0,0.0,0.0,0.00152162936482,0.00152162936482,0.0,0.0,0.0,0.0,0.0,0.0,0.00304287815979,0.00150849615926,0.0,0.0030419007335,0.0,0.0,0.0,0.0,0.0,0.00152449663954,0.0,0.0015356572641,0.0,0.0,0.00304599008656,0.0,0.0,0.0,0.00153153127233,0.0])
# Creating weights for histo: y8_M_16
y8_M_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180734341243,0.0,0.0,0.0,0.000180766987918,0.0,0.0,0.0,0.0,0.000180657151876,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180003640899,0.0,0.0,0.0,0.000361281541581,0.0,0.0,0.0,0.000180970259668,0.0,0.0,0.0,0.0,0.000180154593273,0.0,0.0,0.0,0.0,0.0,0.000180970259668,0.0,0.0,0.000180003640899,0.0,0.0,0.0,0.000361441117605,0.0,0.0,0.0,0.000180003640899,0.0,0.0,0.000180657151876,0.0,0.000180612801676,0.0,0.0,0.000180503389116,0.0,0.0,0.0,0.0,0.0,0.000180766987918,0.0,0.0,0.0,0.0,0.0,0.000361110724014,0.00018054716184])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights+y8_M_12_weights+y8_M_13_weights+y8_M_14_weights+y8_M_15_weights+y8_M_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights+y8_M_12_weights+y8_M_13_weights+y8_M_14_weights+y8_M_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights+y8_M_12_weights+y8_M_13_weights+y8_M_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights+y8_M_12_weights+y8_M_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights+y8_M_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights+y8_M_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights+y8_M_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y8_M_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"M [ j_{1} , j_{2} ] ( GeV ) ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights+y8_M_12_weights+y8_M_13_weights+y8_M_14_weights+y8_M_15_weights+y8_M_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y8_M_0_weights+y8_M_1_weights+y8_M_2_weights+y8_M_3_weights+y8_M_4_weights+y8_M_5_weights+y8_M_6_weights+y8_M_7_weights+y8_M_8_weights+y8_M_9_weights+y8_M_10_weights+y8_M_11_weights+y8_M_12_weights+y8_M_13_weights+y8_M_14_weights+y8_M_15_weights+y8_M_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_7.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_7.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_7.eps')
# Running!
if __name__ == '__main__':
selection_7()
| 35,466 | 29,526 |