index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,600 | 6bbe16f4ca831bc7b775eaccdcb43c5dc0de54b8 | """
This script runs the application using a development server.
"""
from os import environ
from lang_ident import APP
if __name__ == '__main__':
# HOST = environ.get('SERVER_HOST', 'localhost') # only accept connections from same computer
HOST = '0.0.0.0' # hosting in local network
try:
PORT = int(environ.get('SERVER_PORT', '4242'))
except ValueError:
PORT = 4242
APP.secret_key = "This secret key will be in wsgi on production"
APP.run(HOST, PORT) |
20,601 | 9da0e633ac6019bcc81d028dd447f65527be3c61 |
import datetime
import logging
import os
from context import component
class Formatter(logging.Formatter):
def format(self, record):
if len(record.args) == 0:
module = record.module
else:
module = str(record.args["module"])
if hasattr(record, "message"):
message = record.message
else:
if hasattr(record, "msg"):
message = str(record.msg)
else:
message = "No message for " + str(record)
record.module = module
record.message = message
return super(Formatter, self).format(record)
@component
class Logger:
LOG_FILE_FORMAT = "log_{0:04d}-{1:02d}-{2:02d}.txt"
"""Log file format"""
LOG_PATH = "log"
"""Log path"""
LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(module)s %(message)s"
"""Log format"""
COMPONENT_PROPERTY_NAME = "logger"
def __init__(self):
name = __file__.split(os.path.sep)[-3]
self.__logger = logging.getLogger(name)
self.__logger.setLevel(logging.DEBUG)
today = datetime.datetime.now()
log_path = os.path.join(os.path.dirname(__file__), "../" + self.LOG_PATH)
log_file_name = os.path.join(log_path, self.LOG_FILE_FORMAT.format(today.year, today.month, today.day))
formatter = Formatter(self.LOG_FORMAT)
file_handler = logging.FileHandler(log_file_name)
file_handler.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
self.__logger.addHandler(file_handler)
self.__logger.addHandler(console_handler)
@property
def logger(self) -> logging.Logger:
return self.__logger
def log_method(func):
def wrapper(*args, **kwargs):
from app import AppContext
ctx = AppContext.get_context()
logger = ctx.match_component_by_type(logging.Logger)
func_name = args[0].__class__.__name__ + "." + func.__name__
module_name = args[0].__class__.__module__.strip("__")
module_info = {"module": module_name}
logger.info(func_name + " started", module_info)
result = func(*args, **kwargs)
logger.info(func_name + " completed", module_info)
return result
return wrapper
|
20,602 | d20113e730dcb7dcf909a7920d0d2d7f040297ce | import os
import skimage.io
import skimage.util
import skimage.filters
import skimage.transform
import numpy as np
from PIL import Image
def gaussian_noise(im, var=0.01):
# var: 0 ~ 0.1
noisy = skimage.util.random_noise(im, mode="gaussian", var=var)
noisy = np.clip(noisy, 0, 1.0)
return skimage.util.img_as_ubyte(noisy)
def salt_and_pepper(im, amount=0.01):
noisy = skimage.util.random_noise(im, mode="s&p", amount=amount)
noisy = np.clip(noisy, 0, 1.0)
return skimage.util.img_as_ubyte(noisy)
def chroma_abberation(im, max_shift=20):
abber = im.copy()
shifts = np.random.randint(1, max_shift, size=6)
abber[:-shifts[0], :-shifts[1], 0] = abber[shifts[0]:, shifts[1]:, 0]
abber[:-shifts[2], :-shifts[3], 1] = abber[shifts[2]:, shifts[3]:, 1]
abber[:-shifts[4], :-shifts[5], 2] = abber[shifts[4]:, shifts[5]:, 2]
return abber
def low_resolution(im, scale=0.2):
size = im.shape[:2]
scaled = skimage.transform.rescale(im, scale)
noisy = skimage.transform.resize(scaled, size)
noisy = np.clip(noisy, 0, 1.0)
return skimage.util.img_as_ubyte(noisy)
def gaussian_blur(im, sigma=4):
# sigma: 0 ~ 10
noisy = skimage.filters.gaussian(im, sigma=sigma)
noisy = np.clip(noisy, 0, 1.0)
return skimage.util.img_as_ubyte(noisy)
def quantization_noise(im, level=16):
# level: 32 ~ 2
im = skimage.util.img_as_ubyte(im)
level = int(level)
T = np.arange(0, 255, 256/level)
noisy = np.digitize(im.flat, T)
noisy = T[noisy-1].reshape(im.shape).astype(np.uint8)
noisy = skimage.util.img_as_float(noisy)
noisy = np.clip(noisy, 0, 1.0)
return skimage.util.img_as_ubyte(noisy)
def jpeg_compression(im, quality=20):
quality = int(quality)
im = skimage.util.img_as_ubyte(im)
obj = Image.fromarray(im)
filename = os.getpid()
obj.save("/tmp/{}.jpg".format(filename), format="JPEG", quality=int(quality))
noisy = skimage.io.imread("/tmp/{}.jpg".format(filename))
return skimage.util.img_as_ubyte(noisy)
def f_noise(im, scale=8, clip=True):
# scale: 1 ~ 15
def one_f(beta=-1):
dim = im.shape[:2]
u1 = np.arange(np.floor(dim[0]/2)+1)
u2 = -1 * np.arange(np.ceil(dim[0]/2)-1, 0, -1)
u = np.concatenate([u1, u2]) / dim[0]
u = np.tile(u, (dim[1], 1))
u = np.swapaxes(u, 0, 1)
v1 = np.arange(np.floor(dim[1]/2)+1)
v2 = -1 * np.arange(np.ceil(dim[1]/2)-1, 0, -1)
v = np.concatenate([v1, v2]) / dim[1]
v = np.tile(v, (dim[0], 1))
s_f = np.power(np.power(u, 2) + np.power(v, 2) + 1e-5, beta/2)
s_f[s_f == np.inf] = 0
phi = np.random.uniform(size=dim)
x = np.power(s_f, 0.5) * (np.cos(2*np.pi*phi) + 1j*np.sin(2*np.pi*phi))
x = np.fft.ifft2(x)
x = np.real(x)
return x
im = skimage.util.img_as_float(im)
noisy = im.copy()
if len(noisy.shape) == 3:
noisy[:, :, 0] = im[:, :, 0] + scale*one_f(-2)
noisy[:, :, 1] = im[:, :, 1] + scale*one_f(-2)
noisy[:, :, 2] = im[:, :, 2] + scale*one_f(-2)
else:
noisy[:, :] = im[:, :] + scale*one_f(-2)
noisy = np.clip(noisy, 0, 1.0)
return skimage.util.img_as_ubyte(noisy)
|
20,603 | 7cf07c71a76600361abf08ad61caa0b0d5d2388a | from bs4 import BeautifulSoup as soup
import urllib.request as req
my_url = req.urlopen('https://www.reddit.com/r/Awww/')
fd = open('./images.html', 'w')
def main():
page = soup(my_url, 'html.parser')
images = page.findAll(['img'])
for each in images:
url = each['src']
fd.write("""
<div>
<img alt="" src="{}"
</div>
""".format(url))
fd.close()
if __name__ == "__main__":
main()
|
20,604 | fd6168f50165a4e0a9fe066e15e45e73ded4a145 | # app/__init__.py (ROOT PACKAGE)
# PYTHON CODE FOR SECTION : 11 LECTURE : 42 (flask-bcrypt and flask-login)
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
# from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
db = SQLAlchemy()
# migrate = Migrate()
bootstrap = Bootstrap()
#login_manager = LoginManager()
#bcrypt = Bcrypt()
##NOTES:
#because we are using scalable code, we need a python fuction to create an instance.
#we will not create a global flask instance because our structure is now modular
#Can be passed either dev, test or prod - this is an app factory
def create_app(config_type):
app = Flask(__name__)
#load the absolute path to the configuration file
configuration = os.path.join(os.getcwd(), 'config', config_type + '.py')
app.config.from_pyfile(configuration)
#INIT section - pass the flask instance to the other instances to bind them
db.init_app(app)
# migrate.init_app(app, db)
bootstrap.init_app(app)
#login_manager.init_app(app)
#bcrypt.init_app(app)
#Import and Register Blueprints
from app.catalog import main #location of file in app->category (NOT the flask instance)
app.register_blueprint(main)
from app.auth import authentication
app.register_blueprint(authentication)
return app
|
20,605 | fb02a9655bb89c59a21ba6e763f8c36789e3b125 | from django.core.mail import EmailMessage
import logging
from user.models import User
from datetime import datetime, timedelta
from psycopg2 import OperationalError
from .models import Order
from rest_framework.exceptions import ValidationError
from celery import shared_task
logger = logging.getLogger('django')
class Util:
@shared_task
def send_email(data):
email = EmailMessage(
subject=data['email_subject'], body=data['email_body'], to=[data['to_email']])
email.send()
@shared_task
def send_delivery_email(email):
try:
order = Order.objects.filter(is_delivered=False)
if order:
order_list = order.values('id')
for orders in range(len(order_list)):
order_id = order_list[orders]['id']
order_obj = Order.objects.get(id=order_id)
user = User.objects.get(id=order_obj.owner_id)
orderd_time = order_obj.created_date
if datetime.now() - orderd_time.replace(tzinfo=None) > timedelta(hours=24):
email_body = 'Hi ' + user.username + \
' your order has been delivered successully'
data = {'email_body': email_body, 'to_email': user.email,
'email_subject': 'Order Delivered'}
Util.send_email(data)
order_obj.is_delivered = True
order_obj.save()
logger.info("Email sent successfully using celery")
except OperationalError as e:
logger.error(e)
except ValidationError as e:
logger.error(e)
except Exception as e:
logger.error(e)
print(e)
|
20,606 | 84a382f0efa9111bda498a9b25df8be63c359cc4 | import math
angulo = float(input('Informe o angulo: '))
seno = math.asin(math.radians(angulo))
cosseno = math.cos(math.radians(angulo))
tangente = math.atan(math.radians(angulo))
print('O angulo é: {:.2f}\n o seno é: {:.2f}\n o cosseno é: {:.2f}\n e a tangente é: {:.2f}'.format(angulo, seno, cosseno, tangente))
|
20,607 | f3522b620bfaaad5759492a629ccdc32baf11829 | import random
import sys
from mitmproxy.flow import FlowWriter
state = {}
def start():
if len(sys.argv) != 2:
raise ValueError('Usage: -s "flowriter.py filename"')
if sys.argv[1] == "-":
f = sys.stdout
else:
f = open(sys.argv[1], "wb")
state["flow_writer"] = FlowWriter(f)
def response(flow):
if random.choice([True, False]):
state["flow_writer"].add(flow)
|
20,608 | 6a753374397a8ee3176b2b1f2366067db68c3798 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval resnet."""
import os
import numpy as np
import mindspore as ms
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore_gs import PrunerKfCompressAlgo, PrunerFtCompressAlgo
from mindspore_gs.pruner.scop.scop_pruner import KfConv2d, MaskedConv2dbn
from src.CrossEntropySmooth import CrossEntropySmooth
from src.resnet import resnet50 as resnet
from src.model_utils.config import config
if config.dataset == "cifar10":
from src.dataset import create_dataset1 as create_dataset
else:
from src.dataset import create_dataset2 as create_dataset
ms.set_seed(1)
def eval_net():
"""eval net"""
target = config.device_target
# init context
ms.set_context(mode=ms.GRAPH_MODE, device_target=target, save_graphs=False)
if target == "Ascend":
device_id = int(os.getenv('DEVICE_ID'))
ms.set_context(device_id=device_id)
# create dataset
dataset = create_dataset(dataset_path=config.data_path, do_train=False, batch_size=config.batch_size,
eval_image_size=config.eval_image_size, target=target)
# define net
net = resnet(class_num=config.class_num)
net = PrunerKfCompressAlgo({}).apply(net)
out_index = []
param_dict = ms.load_checkpoint(config.checkpoint_file_path)
for key in param_dict.keys():
if 'out_index' in key:
out_index.append(param_dict[key])
for _, (_, module) in enumerate(net.cells_and_names()):
if isinstance(module, KfConv2d):
module.out_index = out_index.pop(0)
ft_algo = PrunerFtCompressAlgo({'prune_rate': config.prune_rate})
net = ft_algo._recover_conv(net)
# load checkpoint
ms.load_param_into_net(net, param_dict)
net.set_train(False)
# define loss, model
if config.dataset == "imagenet2012":
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = CrossEntropySmooth(sparse=True, reduction='mean',
smooth_factor=config.label_smooth_factor,
num_classes=config.class_num)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# define model
model = ms.Model(net, loss_fn=loss, metrics={'top_1_accuracy'})
# eval model
res = model.eval(dataset)
masked_conv_list = []
for imd, (nam, module) in enumerate(net.cells_and_names()):
if isinstance(module, MaskedConv2dbn):
masked_conv_list.append((nam, module))
for imd in range(len(masked_conv_list)):
if 'conv2' in masked_conv_list[imd][0] or 'conv3' in masked_conv_list[imd][0]:
masked_conv_list[imd][1].in_index = masked_conv_list[imd - 1][1].out_index
# Only use when calculate params, next version will provide the interface.
net = PrunerFtCompressAlgo({})._pruning_conv(net)
# calculate params
total_params = 0
for param in net.trainable_params():
total_params += np.prod(param.shape)
print("result:", res, "prune_rate=", config.prune_rate,
"ckpt=", config.checkpoint_file_path, "params=", total_params)
if __name__ == '__main__':
eval_net()
|
20,609 | 89e00955a48437789de454be1e5b04527e646946 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-04 08:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('switch', '0002_auto_20180704_1532'),
]
operations = [
migrations.AlterField(
model_name='backup',
name='Snmpv3',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
20,610 | 585700929b7e6c472ea0ed3a5acd29201e2aed92 | '''
Title: Flow Tables Combine
Purpose: Automate the joining of CSV files exported from FlowJo
Author: Kyle Kroll
Contact: kkroll1 (at) bidmc (dot) harvard (dot) edu
Affiliation: Reeves Lab - Center for Virology and Vaccine Research
Beth Israel Deaconess Medical Center/Harvard Medical School
https://github.com/KrollBio/FTC
'''
from os import listdir
from os.path import isfile
from os.path import join
from os.path import isdir
import pandas as pd
import argparse
import sys
def load_csv(filepath):
# Load CSV and remove the Mean and SD rows
# Split column names to remove the unnecessarily long gating paths
temp_df = pd.read_csv("{0}{1}".format(file_dir, filepath), delimiter=",")
temp_df = temp_df.drop(temp_df[temp_df.iloc[:,0] == "Mean"].index)
temp_df = temp_df.drop(temp_df[temp_df.iloc[:, 0] == "SD"].index)
new_colnames = [modify_col_names(x) for x in temp_df.columns]
new_colnames[0] = "Sample"
temp_df.columns = new_colnames
temp_df = temp_df.loc[:, ~temp_df.columns.str.contains('^Unnamed')]
return(temp_df)
def modify_col_names(colnames):
# Split column name and take last item in split
split_name = colnames.split("/")
return(split_name[len(split_name)-1])
def main():
csv_files = [f for f in listdir(file_dir) if isfile(join(file_dir, f))]
list_dfs = [load_csv(x) for x in csv_files]
concat_df = pd.concat(list_dfs, sort=False, ignore_index=True)
concat_df.to_csv(output_file, sep=file_sep, index=False)
if __name__ == '__main__':
# Adding args to command line to specify input file directory, output file, and separator to use
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", help="Input Directory", type=str, required=True)
parser.add_argument("--output", "-o", help="Output filename", type=str, required=True)
parser.add_argument("--separator", "-s", help="Separator, options: [csv] [tab]", required=False, default=",", type=str)
args = parser.parse_args()
file_dir = args.input + "/"
if not isdir(file_dir):
print("Input directory does not exist\nRun 'python FTC.py -h' for help.")
sys.exit(0)
if args.separator == "csv":
file_sep = ","
elif args.separator == "tab":
file_sep = "\t"
else:
print("Invalid file output separator supplied.\nRun 'python FTC.py -h' for help.")
sys.exit(0)
output_file = args.output
main() |
20,611 | e4b7df75dd53ccb8b5459d13a3939736bde1f50d | '''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .sframe import SFrame
from ..cython.context import debug_trace as cython_context
from ..util import _is_non_string_iterable
from .sarray import SArray, _create_sequential_sarray
import copy
VERTEX_GFRAME = 0
EDGE_GFRAME = 1
class GFrame(SFrame):
"""
GFrame is similar to SFrame but is associated with an SGraph.
- GFrame can be obtained from either the `vertices` or `edges`
attributed in any SGraph:
>>> import graphlab
>>> g = graphlab.load_sgraph(...)
>>> vertices_gf = g.vertices
>>> edges_gf = g.edges
- GFrame has the same API as SFrame:
>>> sa = vertices_gf['pagerank']
>>> # column lambda transform
>>> vertices_gf['pagerank'] = vertices_gf['pagerank'].apply(lambda x: 0.15 + 0.85 * x)
>>> # frame lambda transform
>>> vertices_gf['score'] = vertices_gf.apply(lambda x: 0.2 * x['triangle_count'] + 0.8 * x['pagerank'])
>>> del vertices_gf['pagerank']
- GFrame can be converted to SFrame:
>>> # extract an SFrame
>>> sf = vertices_gf.__to_sframe__()
"""
def __init__(self, graph, gframe_type):
self.__type__ = gframe_type
self.__graph__ = graph
self.__sframe_cache__ = None
self.__is_dirty__ = False
def __to_sframe__(self):
return copy.copy(self._get_cache())
#/**************************************************************************/
#/* */
#/* Modifiers */
#/* */
#/**************************************************************************/
def add_column(self, data, name=""):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
Parameters
----------
data : SArray
The 'column' of data.
name : string
The name of the column. If no name is given, a default name is chosen.
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
def add_columns(self, datalist, namelist):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
Parameters
----------
datalist : list of SArray
A list of columns
namelist : list of string
A list of column names. All names must be specified.
"""
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(namelist):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list: must all be str")
for (data, name) in zip(datalist, namelist):
self.add_column(data, name)
def remove_column(self, name):
"""
Removes the column with the given name from the SFrame.
Parameters
----------
name : string
The name of the column to remove.
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert name != '__id', 'Cannot remove \"__id\" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert name != '__src_id', 'Cannot remove \"__src_id\" column'
assert name != '__dst_id', 'Cannot remove \"__dst_id\" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(name)
self.__graph__.__proxy__ = graph_proxy
except:
self.__is_dirty__ = False
raise
def swap_columns(self, column_1, column_2):
"""
Swaps the columns with the given names.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
"""
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
def rename(self, names):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
def add_row_number(self, column_name='id', start=0):
if type(column_name) is not str:
raise TypeError("Must give column_name as str")
if column_name in self.column_names():
raise RuntimeError("Column name %s already exists" % str(column_name))
if type(start) is not int:
raise TypeError("Must give start as int")
the_col = _create_sequential_sarray(self.num_rows(), start)
self[column_name] = the_col
return self
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if (key in ['__id', '__src_id', '__dst_id']):
raise KeyError('Cannot modify column %s. Changing __id column will\
change the graph structure' % key)
else:
self.__is_dirty__ = True
super(GFrame, self).__setitem__(key, value)
#/**************************************************************************/
#/* */
#/* Read-only Accessor */
#/* */
#/**************************************************************************/
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges']
def num_cols(self):
"""
Returns the number of columns.
Returns
-------
out : int
Number of columns in the SFrame.
"""
return len(self.column_names())
def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields()
def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types()
#/**************************************************************************/
#/* */
#/* Internal Private Methods */
#/* */
#/**************************************************************************/
def _get_cache(self):
if self.__sframe_cache__ is None or self.__is_dirty__:
if self._is_vertex_frame():
self.__sframe_cache__ = self.__graph__.get_vertices()
elif self._is_edge_frame():
self.__sframe_cache__ = self.__graph__.get_edges()
else:
raise TypeError
self.__is_dirty__ = False
return self.__sframe_cache__
def _is_vertex_frame(self):
return self.__type__ == VERTEX_GFRAME
def _is_edge_frame(self):
return self.__type__ == EDGE_GFRAME
@property
def __proxy__(self):
return self._get_cache().__proxy__
|
20,612 | 5abdb6c4fd7c50e09f013f10e550d48db817cc1a | #!/usr/bin/env python
import sys
import SocketServer
import threading
class ThreadingUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
allow_reuse_address = True
class EchoHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0]
sock = self.request[1]
cli_ip, cli_port = self.client_address
srv_ip, srv_port = sock.getsockname()
print "%s:%s -> %s:%s got:" % (cli_ip, cli_port, srv_ip, srv_port),
print data,
sock.sendto(data.upper(), self.client_address)
print "%s:%s -> %s:%s sent:" % (srv_ip, srv_port, cli_ip, cli_port),
print data.upper(),
if __name__ == "__main__":
port = int(sys.argv[1])
server = ThreadingUDPServer(("0.0.0.0", port), EchoHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
while True:
try:
# long timeout to allow for signals with minimal polling
thread.join(2**31)
except KeyboardInterrupt:
break
|
20,613 | cdcba545c1902b4220b9b72e644f7ff2bd91d954 | from django.core.mail import send_mail
from django.urls import reverse, reverse_lazy
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.conf import settings
import logging
from .models import TripRequest
logger = logging.getLogger(__name__)
class TripRequestEmail:
def __init__(self, triprequest, requestor_subject, requestor_body, manager_notify=False, manager_subject=None, manager_body=None):
self.triprequest = triprequest
self.from_email = self.contact_email
self.requestor_subject = requestor_subject
self.requestor_body = requestor_body
self.manager_notify = manager_notify and manager_subject is not None and manager_body is not None
self.manager_subject = manager_subject
self.manager_body = manager_body
@property
def contact_email(self):
return settings.TP_DEFAULT_FROM_EMAIL
def _recipients(self, manager_only=False, extra_emails=None):
recipients = extra_emails if extra_emails is not None else []
recipients.append(self.triprequest.contact_email)
return recipients
def send_manager(self, emails=None):
if self.manager_subject is None or self.manager_body is None:
raise ValidationError(
_(f'Unable to email manager with no subject or body provided to {self.__className__} object'),
params={'triprequest': self.triprequest,
'requestor_subject': self.requestor_subject, 'requestor_body': self.requestor_body}
)
mail = send_mail(
self.manager_subject,
self.manager_body,
self.from_email,
self._recipients(manager_only=True)
)
def send_requestor(self, emails=None):
mail = send_mail(
self.requestor_subject,
self.requestor_body,
self.from_email,
self._recipients(extra_emails=emails)
)
def send(self, emails=None):
if self.manager_notify:
try:
self.send_manager(emails)
except ValidationError as ve:
pass
self.send_requestor(emails)
class TripRequestCreatedEmail(TripRequestEmail):
def __init__(self, triprequest):
requestor_subject = f'Vehicle Request {triprequest.submitted}'
requestor_body = f"{triprequest.requestor.first_name},\r\nYour vehicle request ID no. {triprequest.pk} has been submitted and will be processed in due time. When your request has been processed you will be notified. You can click on the link or copy it into the address bar of your Internet browser to verify this for youself: transportation.trbc.org{reverse('request-detail', kwargs={ 'pk': triprequest.pk })}. If you have questions or problems with your request contact the transportation office at ext. 3155 or email {triprequest.manager_fullname}, {self.contact_email}"
manager_subject = f'New Vehicle Request {triprequest.pk}'
manager_body = f'New vehicle request from {triprequest.requestor_fullname}'
super().__init__(triprequest, requestor_subject, requestor_body, True, manager_subject, manager_body)
class TripRequestCanceledEmail(TripRequestEmail):
def __init__(self, triprequest):
requestor_subject = f'Vehicle Request {triprequest.pk} has been canceled'
requestor_body = f"{triprequest.requestor.first_name},\r\nYour vehicle request ID no. {triprequest.pk} has been canceled and no longer processed. If you believe there is an error please contact our office. You can click on the link or copy it into the address bar of your Internet browser to verify this for youself: transportation.trbc.org{reverse('request-detail', kwargs={ 'pk': triprequest.pk })}. If you have questions or problems with your request contact the transportation office at ext. 3155 or email {triprequest.manager_fullname}, {self.contact_email}"
manager_subject = f'Vehicle Request {triprequest.pk} canceled by user'
manager_body = f"Vehicle request from {triprequest.requestor_fullname} has been canceled by the user. You can verify this for yourself: transportation.trbc.org{reverse('request-detail', kwargs={ 'pk': triprequest.pk })}"
super().__init__(triprequest, requestor_subject, requestor_body, True, manager_subject, manager_body)
class TripRequestApprovedEmail(TripRequestEmail):
def __init__(self, triprequest):
subject = f'Your vehicle request has been APPROVED for request {triprequest.pk}'
body = f"{triprequest.requestor.first_name},\r\nYour vehicle request ID no. {triprequest.pk} has been approved. You can click on the link or copy it into the address bar of your Internet browser to verify this for youself: transportation.trbc.org{reverse('request-detail', kwargs={ 'pk': triprequest.pk })}. If you have questions or problems with your request contact the transportation office at ext. 3155 or email {triprequest.manager_fullname}, {self.contact_email}"
super().__init__(triprequest, subject, body)
class TripRequestDeniedEmail(TripRequestEmail):
def __init__(self, triprequest):
subject = f'Your vehicle request has been DENIED for request {triprequest.pk}'
body = f"{triprequest.requestor.first_name},\r\nYour vehicle request ID no. {triprequest.pk} has been denied. You can click on the link or copy it into the address bar of your Internet browser to verify this for youself: transportation.trbc.org{reverse('request-detail', kwargs={ 'pk': triprequest.pk })}. If you have questions or problems with your request contact the transportation office at ext. 3155 or email {triprequest.manager_fullname}, {self.contact_email}"
super().__init__(triprequest, subject, body)
class TripRequestStatusEmail(TripRequestEmail):
def __init__(self, triprequest, old_status, new_status):
if triprequest.status != new_status:
raise ValidationError(
_(f'{triprequest.status} does not equal {new_status}'),
params={'triprequest': triprequest,
'old_status': old_status, 'new_status': new_status}
)
self.old_status = old_status
self.new_status = new_status
old_status_text = self._get_status_display(self.old_status)
new_status_text = self._get_status_display(self.new_status)
subject = f'Your vehicle request\'s status has updated to \'{new_status_text}\' for request {triprequest.pk}'
body = f"{triprequest.requestor.first_name},\r\nYour vehicle request ID no. {triprequest.pk} has had it's status changed from '{old_status_text}' to '{new_status_text}'. You can click on the link or copy it into the address bar of your Internet browser to verify this for youself and see if you need to provide more information: transportation.trbc.org{reverse('request-detail', kwargs={ 'pk': triprequest.pk })}. If you have questions or problems with your request contact the transportation office at ext. 3155 or email {triprequest.manager_fullname}, {self.contact_email}"
super().__init__(triprequest, subject, body)
def _get_status_display(self, status):
return TripRequest.STATUS_CHOICES[status][1]
|
20,614 | 6b0a02e79879743729667fbd74735fbc5432722c | from stud.dataset_creation import PreProcessor, SRLDataset
from stud.training import opts
def create_dataset(dataset_type, soruce, opts):
"""
Function that creates train-dev-test or submit SRL Dataset
Args:
dataset_type: train or dev or test or submit if we are running in "implementation.py"
source: soruce to read data from:
The path to the train-dev-test data
sentence we receive in "implementation.py"
opts: dictionary outlining various options including if we want POS tags or not
Returns:
dataset: SRLDataset instance
p.list_l_original_predicates: Gold predicates
"""
p = PreProcessor(dataset_type, opts)
# If we are NOT running "implementation.py", we read the data from file
if dataset_type == "train" or dataset_type == "dev" or dataset_type == "test":
path_to_data = soruce
p.read_labelled_data(path_to_data)
# Otherwise, we read the sentence that "implementation.py" gave us
elif dataset_type == "submit":
submission_sentence = soruce
p.read_test_data(submission_sentence)
# Encode all the data to a list of torchTensors
encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()
# Create SRL dataset
dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)
print("{} dataset size is {}".format(dataset_type, len(dataset)))
if dataset_type == "train" or dataset_type == "dev" or dataset_type == "test":
return dataset
elif dataset_type == "submit":
return dataset, p.list_l_original_predicates |
20,615 | 88d73688d0dd4900549adaac5b883dd4ae65cf18 | #Escreva um programa que receba o preço de dois produtos. Calcule um desconto de 8% no
#primeiro produto, 11% no segundo e apresente o valor final a ser pago.
preço = float(input('Qual é o preço do primeiro produto? R$'))
preço2 = float(input('Qual é o preço do segundo produto? R$'))
novo = preço - (preço * 8 / 100)
novo2 = preço2 - (preço2 * 11 / 100)
print('O preço do primeiro produto com desconto é: ', novo)
print('O preço do segundo produto com desconto é: ', novo2) |
20,616 | 04eca521efdaf86b36c45afa911cfc03642c865e | # 2520 is the smallest number that can be divided by each of the numbers
# from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly divisible by all
# of the numbers from 1 to 20?
numbers = [x for x in range(1, 21)]
candidate = 0
running = 'True'
while running == 'True':
candidate += 2
for x in numbers:
if x == 20:
if candidate % x == 0:
running = 'False'
elif candidate % x != 0:
break
print(candidate)
|
20,617 | 20ed2a926d18dfd1b4c38128c49e798a4327492b | from scrapy.cmdline import execute
execute(["scrapy","crawl","login","-o","msg.json"])
# f = open("G:\\scpy\\login\\login\\json.txt")
# wf = open("result.txt","w")
# for line in f:
# path = line.split("\"")[-2]
# wf.writelines(path+"\n") |
20,618 | 30576c1edd534b02a0325601c9fed3f87308627b | #!/usr/local/bin/python
import markdown2
import lxml.etree as etree
import urllib
import StringIO
import os
import re
import subprocess
import ipdb
import numpy as np
def rtf(unistr):
unistr = unistr.replace("{", "{{")
unistr = unistr.replace("}", "}}")
return ''.join([c if ord(c) < 128 else u'\\u' + unicode(ord(c)) + u'?' for c in unistr])
def getPath(ele):
ps = ele.xpath("ancestor::*")
joinList = []
for each in ps:
if len(each.xpath("Title"))>0:
joinList.append(each.xpath("Title")[0].text)
return "/".join(joinList)
rtfHeader=\
u"""{\\rtf1\\ansi\\ansicpg1252\cocoartf1404\cocoasubrtf460
{\\fonttbl\\f0\\fmodern\\fcharset0 Courier;}
{\\colortbl;
\\red255\\green255\\blue255;
\\red230\\green255\\blue79;
\\red255\\green204\\blue102;
\\red255\\green128\\blue190;
\\red128\\green255\\blue105;
\\red143\\green255\\blue255;
\\red0\\green0\\blue0;}
\pard\\tx720\\tx1440\\tx2160\\tx2880\\tx3600\\tx4320\\fi360\sl288\slmult1\pardirnatural
\\f0\\fs28 \cf0"""
rtfTail="}"
def rtf(unistr):
unistr = unistr.replace("{", "{{")
unistr = unistr.replace("}", "}}")
return ''.join([c if ord(c) < 128 else u'\\u' + unicode(ord(c)) + u'?' for c in unistr])
projectPath = "./main.scriv"
projectName = os.path.join(projectPath, "main.scrivx")
docPath = os.path.join(projectPath,"Files","Docs")
paperRoot = "References:Papers"
s=etree.tostring
e = etree.parse(projectName)
root = e.getroot()
paperRootList = paperRoot.split("/")
fileRoot = root.xpath("//Binder")[0]
thisRoot = fileRoot
for eachSub in paperRootList:
thisSub = thisRoot.xpath("./BinderItem[Title='{}']".format(eachSub))
if len(thisSub)==0:
raise Exception("error to go into paperRoot: {}".format(paperRoot))
elif len(thisSub)==1:
thisSub = thisSub[0]
else:
print "Some folder have the same name, don't know which one to use.."
raise Exception()
thisChildren = thisSub.xpath("Children")
if len(thisChildren)==0:
raise Exception("error to go into paperRoot Children: {}".format(paperRoot))
else:
thisChildren = thisChildren[0]
thisRoot = thisChildren
research = thisSub
researchChildren = thisChildren
dtypeAlltext = np.dtype([
("bibcode", "U30"),
("refkey", "U30"),
("ID", "U10"),
("ind", np.int),
("obj", np.object),
("path", "U100")])
alltext = []
alltext = np.array(alltext, dtype=dtypeAlltext)
# count for text in the research folder
allTextTemp = research.xpath(".//BinderItem[@Type='Text']")
for j, eachc in enumerate(allTextTemp):
t = eachc.xpath("MetaData/CustomMetaData/MetaDataItem[FieldID='bibcode']")
if len(t)>0:
thisBibcode = t[0].xpath("Value")[0].text
else:
thisBibcode = "?"
thisID = eachc.get("ID")
refkey = eachc.xpath("Title")[0].text
thisPath = getPath(eachc)
alltext = np.append(alltext,
np.array((thisBibcode, refkey, thisID, 0, eachc, thisPath), dtype=dtypeAlltext))
print "find text in research: {} at {}".format(thisBibcode, thisPath)
allUniqueID, inds = np.unique(alltext["ID"],return_index=True)
allUnique = alltext[inds]
allCommentFound = []
rmConfigReg = re.compile(ur"([^\\])(\{.+?[^\\]\})")
rmOther0 = re.compile(ur"\\partightenfactor\d\?|\\pardirnatural|\\pard|\\slmult\d|\\partightenfactor\d|\\ls\d|\\ilvl\d|\\cf\d|\\tx\d+|\\li\d+\\fi\-\d+|\\sl\d+")
commentColorR=r"\\red230\\green255\\blue79"
commentColor= "\\red230\\green255\\blue79"
commentColorR=r"\\red143\\green255\\blue255"
commentColor= "\\red143\\green255\\blue255"
commentReg = re.compile(commentColorR)
colortblReg = re.compile(r"\{\\colortbl;(.+?)\}")
result = rtfHeader
for each in allUnique:
thisID = each["ID"]
thisTitle = each["refkey"]
thisFile = os.path.join(docPath,thisID+".rtf")
#print "finding in:", thisTitle, thisFile
if not os.path.exists(thisFile):
print "file not exists: {} for {}".format(thisFile, thisTitle)
continue
with open(thisFile, "r") as f:
thisText = f.read()
textForColor = "".join(thisText.split("\n"))
colorInds = colortblReg.findall(textForColor)
if len(colorInds):
colorInds = colorInds[0].split(";")
else:
raise Exception("no color table for {}".format(textForColor))
colorInds = [eachColor for eachColor in colorInds if len(eachColor)>15]
colortoDo = ";".join(colorInds)
find = commentReg.findall(colortoDo)
if len(find):
colorInd = colorInds.index(commentColor)+1
print "finding color in:", thisTitle, thisFile, colorInd
else:
continue
thisText = "?".join(thisText.split("\n"))
#colorInd = "6"
colorStr = ur"\\cb{}(.+?)\\cb".format(colorInd)
colorReg = re.compile(colorStr)
thisRegResult = colorReg.findall(thisText)
if len(thisRegResult):
print "\nfind grammar in {} with ID:{}".format(thisTitle, thisID)
result += rtf("\\cb6 {}:\\cb1\\\n".format(thisTitle))
thisRegResult = [rmConfigReg.sub(r"\1", eachReg) for eachReg in thisRegResult]
thisRegResult = [rmOther0.sub("", eachReg).split("\\?")[0].strip() for eachReg in thisRegResult]
thisRegResult = [eachReg for eachReg in thisRegResult if eachReg]
result += (rtf("\\\n".join(thisRegResult)) + "\\\n")
result += rtfTail
with open("findgrammar.rtf", "w") as f:
f.write(result.encode("utf8"))
subprocess.call("open findgrammar.rtf", shell=True)
|
20,619 | 68d71971e1677ea2adad3394ec2b24232b96bf37 | #encoding=utf8
from django.conf import settings
from django.utils import timezone
from .models import Task,Slave
from .master import master
import uuid
import os
def handle_upload_file(request,content,style):
# print settings.CONTENT_DIR
# print content,style
content_storage = os.path.join(settings.CONTENT_DIR,str(uuid.uuid4())+os.path.splitext(content.name)[1])
style_storage = os.path.join(settings.STYLE_DIR, str(uuid.uuid4())+os.path.splitext(style.name)[1])
with open(content_storage,'wb') as destination:
for chunk in content.chunks():
destination.write(chunk)
destination.close()
with open(style_storage,'wb') as destination:
for chunk in style.chunks():
destination.write(chunk)
destination.close()
task = Task(content=content_storage,style=style_storage,user=request.user,sub_time=timezone.now())
task.save()
m = master()
args = {'task_id':task.id,'content': content_storage, 'style': style_storage, 'model': 'vgg16', 'ratio': 1e4}
# m.dispatch('10.0.0.64', 8667, args)
print "New task:{0}".format(args)
m.dispatch('127.0.0.1', 8666, args)
def handle_output_file(args,output):
# print settings.OUTPUT_DIR
# print args,output
output_storage = os.path.join(settings.OUTPUT_DIR,str(uuid.uuid4())+os.path.splitext(output.name)[1])
with open(output_storage,'wb') as destination:
for chunk in output.chunks():
destination.write(chunk)
destination.close()
task = Task.objects.get(pk=args['task_id'])
task.output = output_storage
task.save()
print "task:{0} get the output.Save to{1}".format(task.id,task.output) |
20,620 | 25ffbef720e60cf01a990969ba5d32190621fac7 | class Enemy:
def __init__(self):
self.actualPos = [651, 50]
self.sprite = None
self.width = 38
self.length = 40
self.hitbox = None
def setHitbox(self, aux):
self.hitbox = aux
def getHitbox(self):
return self.hitbox
def setSprite(self, image):
self.sprite = image
def getSprite(self):
return self.sprite
def getPostion(self):
return self.actualPos
|
20,621 | f11cc8aacc3fae136813ee3fe28bbf385e835d48 | # -*- coding: utf-8 -*-
# 1. После запуска предлагает пользователю ввести целые неотрицательные числа,
# разделенные любым не цифровым литералом (пробел, запятая, %, буква и т.д.).
# 2. Получив вводные данные, выделяет полученные числа, суммирует их,
# и печатает полученную сумму.
def function(my_input):
input_line = []
for i in range(0, len(my_input)):
if my_input[i].isnumeric():
input_line.append(my_input[i])
elif my_input[i] == "-" and i+1 != len(my_input) and my_input[i+1].isnumeric():
if my_input[i+1].isnumeric():
input_line.append(" ")
input_line.append(my_input[i])
else:
input_line.append(" ")
input_line = "".join(input_line)
input_line = input_line.split(sep=" ")
result = 0
for j in input_line:
if j != "":
result += int(j)
print(result)
print("Введите Ваш текст: ")
function(input())
|
20,622 | 66606dc199e8ed45be83a5f60d809d50b611a517 | from google.appengine.ext import vendor
# add lib to vendor
vendor.add('lib')
|
20,623 | a2339c869b754f8184526081b03797754af2e218 | # Debug script for serial communication between uC and RasPi where current differnce is pre-calculated.
import serial
arduino = serial.Serial('/dev/ttyACM0',9600)
def curr():
line = arduino.readline()
data = []
data.append(line.replace('\r\n',''))
#print data
currDiff = float(data[0].replace('Difference in Current:',''))
# print currDiff
return currDiff
def tot():
total = 0
for x in xrange(0,5):
current = curr()
total = total+current
print ("Current",current)
print ("Total",total)
return total
total = 0
def listtotal():
arr = []
totalval = tot()
for i in xrange(0,5):
arr.append([totalval])
print arr
return arr
while 1:
listtotal()
|
20,624 | 5422fb029f0fbd1f039c29ad21150887e94a18ed | from base import *
########## TEST SETTINGS
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = SITE_ROOT
TEST_DISCOVER_ROOT = SITE_ROOT
TEST_DISCOVER_PATTERN = "test_*.py"
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": "tq",
"USER": "tq",
"PASSWORD": "tq",
"HOST": "",
"PORT": "",
},
}
|
20,625 | 4c9f60fc7d04e13610d02e34cae1fd90ca209a5d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('bill_date', models.DateField()),
('add_datetime', models.DateTimeField(auto_now_add=True)),
('amount', models.DecimalField(max_digits=8, decimal_places=2)),
],
),
migrations.CreateModel(
name='BillRow',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('amount', models.DecimalField(max_digits=8, decimal_places=2)),
('label', models.CharField(max_length=127)),
('bill', models.ForeignKey(to='expenses_app.Bill')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('name', models.CharField(max_length=127)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('name', models.CharField(max_length=127)),
('surname', models.TextField(max_length=127)),
],
),
migrations.CreateModel(
name='Workspace',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
],
),
migrations.AddField(
model_name='person',
name='workspace',
field=models.ForeignKey(to='expenses_app.Workspace'),
),
migrations.AddField(
model_name='category',
name='workspace',
field=models.ForeignKey(to='expenses_app.Workspace'),
),
migrations.AddField(
model_name='billrow',
name='category',
field=models.ForeignKey(to='expenses_app.Category'),
),
migrations.AddField(
model_name='bill',
name='person',
field=models.ForeignKey(to='expenses_app.Person'),
),
migrations.AddField(
model_name='bill',
name='workspace',
field=models.ForeignKey(to='expenses_app.Workspace'),
),
]
|
20,626 | 7fe1f1872a743c988c4f8478076391de9c9b6f0c | __author__="Andrew Pennebaker (andrew.pennebaker@gmail.com)"
__date__="1 Jan 2006 - 6 Apr 2006"
__copyright__="Copyright 2006 Andrew Pennebaker"
class Card:
def __init__(self):
pass
def compare(self, other):
return self.__class__==other.__class__
def __str__(self):
pass
|
20,627 | 8809598de396140be8203fdd14a632a9f337609a | import json
def escape_c_string(s):
return json.dumps(s)
global_i = [0]
def inc_i():
global_i[0] += 1
return global_i[0]
INDENT = ' '
INDENT_TWO = INDENT + INDENT
INDENT_THREE = INDENT + INDENT + INDENT
|
20,628 | 5cebc7e0b3a91f1c1fa6f288f2916ded80674e32 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from accountpage import Ui_MainWindow
from clientpage import Ui_mainWindow
import classes as sc
import uuid
import re
import sys
class Controller(object):
listBankCustomer = []
listFilter = []
test = 'yes'
def __init__(self):
self.clientPage = QtWidgets.QMainWindow()
self.ui = Ui_mainWindow()
self.ui.setupUi(self.clientPage)
self.ui.btn_add_client.clicked.connect(self.openAccountPage)
self.ui.btn_delete.clicked.connect(self.btnDeleteClicked)
# self.ui.btn_delete.clicked.connect(self.ui.listView.clear)
# self.ui.btn_delete.clicked.connect(self.reloadData)
self.ui.btn_modify.clicked.connect(self.btnModifyClicked)
self.ui.btn_done.clicked.connect(self.btnFilterClicked)
self.reloadData()
self.clientPage.show()
def openAccountPage(self):
self.classID = QtWidgets.QMainWindow()
self.ui2 = Ui_MainWindow()
self.ui2.setupUi(self.classID)
self.ui2.btnDone.clicked.connect(self.btnAddClicked)
self.classID.show()
def addAccount(self):
self.classID.close()
def btnAddClicked(self):
try:
name = self.ui2.namefield.text()
birth = self.ui2.dateEdit.date()
birth = str(birth.toPyDate())
id = self.ui2.account_id_field.text()
account = self.ui2.accnumfield1.text()
account2 = self.ui2.accnumfield2.text()
balance = self.ui2.balancefield.text()
if len(name) == 0:
raise sc.MissingDataException('NAME')
if len(account) == 0 or len(account2) == 0:
raise sc.MissingDataException('ACCOUNT NUMBER')
if len(balance) == 0:
raise sc.MissingDataException('BALANCE')
if len(id) == 8 and re.compile('[0-9a-zA-Z]{8}').match(id):
True
else:
if len(id) == 0:
id = uuid.uuid4().hex[:8]
else:
raise sc.FormatException('ID NUMBER')
if len(account) != 8 or not re.compile('\d{8}').match(account):
raise sc.FormatException('ACCOUNT NUMBER')
if len(account2) != 8 or not re.compile('\d{8}').match(account2):
raise sc.FormatException('ACCOUNT NUMBER')
except sc.MissingDataException as mde:
msg = QtWidgets.QMessageBox()
msg.setWindowTitle('Warning!')
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(mde.__str__())
msg.exec()
except sc.FormatException as fe:
msg = QtWidgets.QMessageBox()
msg.setWindowTitle('Warning!')
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(fe.__str__())
msg.exec()
else:
if self.test == 'yes':
Customer = sc.BankCustomers(name.title(), birth, id, account, account2, balance)
if Customer not in self.listBankCustomer:
self.listBankCustomer.append(Customer)
self.listBankCustomer.sort()
self.saveToFile()
msg = QtWidgets.QMessageBox()
else:
msg.setWindowTitle('Warning!')
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("This customer already exist")
msg.exec()
else:
for Customer in self.listBankCustomer:
if Customer.getID() == self.ui2.account_id_field.text():
self.listBankCustomer.remove(Customer)
customer = sc.BankCustomers(name.title(), birth, id, account, account2, balance)
self.listBankCustomer.append(customer)
self.listBankCustomer.sort()
self.saveToFile()
self.classID.close()
self.ui.listView.clear()
self.reloadData()
def btnDeleteClicked(self):
if not self.ui.listView.currentItem():
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Warning!")
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("You should select any person from the list!")
msg.exec()
else:
item = self.ui.listView.currentItem()
tmp = item.text()
tmp = tmp.split('|')
id = tmp[2].split(": ")
id = id[1]
for Customer in self.listBankCustomer:
if id.strip() == Customer.getID():
self.listBankCustomer.remove(Customer)
self.saveToFile()
for Customer in self.listBankCustomer:
self.ui.listView.addItem(Customer.__str__())
self.ui.listView.clear()
self.reloadData()
def btnModifyClicked(self):
if not self.ui.listView.currentItem():
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Warning!")
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("You should select any person from the list!")
msg.exec()
else:
self.openAccountPage()
item = self.ui.listView.currentItem()
tmp = item.text()
tmp = tmp.split('|')
id = tmp[2].split(": ")
id = id[1]
for Customer in self.listBankCustomer:
if id.strip() == Customer.getID():
self.ui2.namefield.setText(Customer.getName())
new = Customer.getBirthday().split('-')
# print(new)
self.ui2.dateEdit.setDate(QtCore.QDate(int(new[0]), int(new[1]), int(new[2])))
self.ui2.account_id_field.setText(Customer.getID())
self.ui2.accnumfield1.setText(Customer.getAccount())
self.ui2.accnumfield2.setText(Customer.getAccount2())
self.ui2.balancefield.setText(Customer.getBalance())
self.test = 'no'
def btnFilterClicked(self):
try:
filter = self.ui.filter_field.text()
if len(str(filter)) == 0:
raise sc.MissingDataException('Filter By')
else:
filter = int(filter)
except sc.MissingDataException as mde:
msg = QtWidgets.QMessageBox()
msg.setWindowTitle('Warning!')
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(mde.__str__())
msg.exec()
else:
self.ui.listView.clear()
for Customer in self.listBankCustomer:
if filter <= int(Customer.getBalance()):
self.ui.listView.addItem(Customer.__str__())
self.ui.filter_field.clear()
def saveToFile(self):
outFile = open("database.txt", "w")
for Customer in self.listBankCustomer:
print('{};{};{};{};{};{}\n'.format(Customer.getName(), Customer.getBirthday(), Customer.getID(), Customer.getAccount(), Customer.getAccount2(), Customer.getBalance()),
file=outFile)
outFile.close()
def reloadData(self):
duplicate = []
inFile = open("database.txt", "r")
for line in inFile:
if line.count(";") == 5:
tmp = line.split(';')
example = sc.BankCustomers(tmp[0].title(), tmp[1], tmp[2], tmp[3], tmp[4], tmp[5][:-1])
if example not in self.listBankCustomer:
self.listBankCustomer.append(example)
inFile.close()
self.listBankCustomer.sort()
for Customer in self.listBankCustomer:
if Customer.__str__() not in duplicate:
duplicate.append(Customer.__str__())
self.ui.listView.addItem(Customer.__str__())
app = QtWidgets.QApplication(sys.argv)
cntrl = Controller()
sys.exit(app.exec_())
|
20,629 | 6b96c02ce52147fe665483247365c3cae88ad969 | # -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
setup(
name='injector',
version='0.1.0',
description='Library for injecting tracers (or other things) into code easily.',
url='https://github.com/pirogoeth/injector',
author='Sean Johnson',
author_email='sean.johnson@maio.me',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
],
packages=find_packages('src'),
package_dir={
'': 'src',
},
scripts=[],
install_requires=[],
include_package_data=True,
test_suite='nose.collector',
tests_require=[
'nose',
'coverage',
],
zip_safe=True
)
|
20,630 | d942010f5ad6745aa52cf51239bdd2b3ce91e2a0 | """
写一个函数,输入 n ,求斐波那契(Fibonacci)数列的第 n 项。斐波那契数列的定义如下:
F(0) = 0, F(1) = 1
F(N) = F(N - 1) + F(N - 2), 其中 N > 1.
斐波那契数列由 0 和 1 开始,之后的斐波那契数就是由之前的两数相加而得出。
答案需要取模 1e9+7(1000000007),如计算初始结果为:1000000008,请返回 1。
示例 1:
输入:n = 2
输出:1
示例 2:
输入:n = 5
输出:5
提示:
0 <= n <= 100
"""
"""
递归 会超时
"""
def fib(n: int) -> int:
if n < 2:
return n
return (fib(n-1) + fib(n-2)) % 1000000007
"""
动态规划
"""
def fib1(n: int) -> int:
dp = {}
dp[0] = 0
dp[1] = 1
if n >=2 :
for i in range(2, n+1):
dp[i] = dp[i-1] + dp[i-2]
return dp[n] % 1000000007
if __name__ == '__main__':
n = 5
print(fib(n))
print(fib1(n)) |
20,631 | 10cc3d75a302f30147e8e7222e13ce833a0949be | # -*- coding: utf-8 -*-
# @Time : 2020/11/27 21:12
# @Author : Wxl
# @Email : 154831156@qq.com
# @File : 3.Python中常用的内容模块.py
# @Software: PyCharm
'''1.sys:与python解释器及其环境操作相关的标准库'''
import sys
# 获取所占的字节数,一个字节占8位
print(sys.getsizeof(45)) # 28
print(sys.getsizeof(True)) # 28
print(sys.getsizeof(False)) # 24
'''2.time:提供与时间相关的各种函数的标准库'''
import time
print(time.time()) # 1606483024.143392
# 转化成本地的时间
print(time.localtime(time.time())) # time.struct_time(tm_year=2020, tm_mon=11, tm_mday=27, tm_hour=21, tm_min=17, tm_sec=33, tm_wday=4, tm_yday=332, tm_isdst=0)
'''3.urllib:用于读取来自网上(服务器)的数据标准库'''
import urllib.request
print(urllib.request.urlopen('http://www.baidu.com/more/').read())
'''4.schedule模块的使用'''
import schedule
def show():
print('哈哈...')
schedule.every(3).seconds.do(show) # 每三秒执行一次show方法
while True:
schedule.run_pending()
time.sleep(1) # 睡眠1秒 |
20,632 | c96e019ffa8e1caef8a8f0d0dab660d755296123 | import random
def guess(x):
# generates a random number which will be stored in variable random_number
random_number = random.randint(1, x)
guess = 0 # initializing the guess with zero so that the while loop can function!
while guess != random_number:
guess = int(input(f"guess a number between 1 & {x}:\n"))
if guess > random_number:
print("sorry your guess is too high,try again")
elif guess < random_number:
print("your guess is too low ")
print(f"yay! you have a correct guess {guess}")
guess(10)
|
20,633 | b54ad4074f92579910f60e56b76e1f0181917056 | import matplotlib
matplotlib.use('Agg')
import parse_midas_data
import pylab
import sys
import numpy
from numpy.random import normal
import diversity_utils
import gene_diversity_utils
import stats_utils
import os
# plotting tools
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
import matplotlib.cm as cmx
from math import log10,ceil
import matplotlib as mpl
mpl.rcParams['font.size'] = 8
mpl.rcParams['lines.linewidth'] = 1.0
mpl.rcParams['legend.frameon'] = False
mpl.rcParams['legend.fontsize'] = 'small'
################################################################################
#
# Standard header to read in argument information
#
################################################################################
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("species_name", help="name of species to process")
parser.add_argument("--debug", help="Loads only a subset of SNPs for speed", action="store_true")
parser.add_argument("--chunk-size", type=int, help="max number of records to load", default=1000000000)
args = parser.parse_args()
species_name = args.species_name
debug = args.debug
chunk_size = args.chunk_size
#############################################################################
# Minimum median coverage of sample to look at
min_coverage = 20
sys.stderr.write("Loading core genes...\n")
core_genes = parse_midas_data.load_core_genes(species_name)
sys.stderr.write("Done! %d core genes\n" % len(core_genes))
#################
# Load metadata #
#################
# Load subject and sample metadata
sys.stderr.write("Loading HMP metadata...\n")
subject_sample_map = parse_midas_data.parse_subject_sample_map()
sys.stderr.write("Done!\n")
# Load time metadata
subject_sample_time_map_all_samples = parse_midas_data.parse_subject_sample_time_map()
######################
# Load coverage data #
######################
# Load genomic coverage distributions
sample_coverage_histograms, samples = parse_midas_data.parse_coverage_distribution(species_name)
median_coverages = numpy.array([stats_utils.calculate_median_from_histogram(sample_coverage_histogram) for sample_coverage_histogram in sample_coverage_histograms])
sample_coverage_map = {samples[i]: median_coverages[i] for i in xrange(0,len(samples))}
# prune time meta data so that the highest coverage sample is retained for those subjects with >1 sample per time pt
subject_sample_time_map = parse_midas_data.prune_subject_sample_time_map(subject_sample_time_map_all_samples,sample_coverage_map)
###############################################################
# Compute Pi within patients to figure out which are haploid #
###############################################################
# Load pi information for species_name
sys.stderr.write("Loading within-sample diversity for %s...\n" % species_name)
samples, total_pis, total_pi_opportunities = parse_midas_data.parse_within_sample_pi(species_name, allowed_variant_types=set(['4D']), allowed_genes=core_genes, debug=debug)
sys.stderr.write("Done!\n")
pis = total_pis/total_pi_opportunities
######################
# compute median cov #
######################
median_coverages = numpy.array([sample_coverage_map[samples[i]] for i in xrange(0,len(samples))])
##########################################################
# load SNP info
##########################################################
# note that this loads info for all samples. Later the desired samples are selected out.
sys.stderr.write("Loading %s...\n" % species_name)
samples, allele_counts_map, passed_sites_map, final_line_number = parse_midas_data.parse_snps(species_name, debug)
sys.stderr.write("Done!\n")
###############################################################
# Indexes for SNP samples that have high coverage and low pis #
###############################################################
# Only plot samples above a certain depth threshold that are "haploids"
high_cov_samples_low_pis = samples[(median_coverages>=min_coverage)*(pis<=1e-03)]
desired_samples_low_pis=(median_coverages>=min_coverage)*(pis<=1e-03)
# get the time info for the high_cov_samples_low_pis -- this is for all visno combos and all samples irrespective of coverage and pis
time_pair_idxs, visno1, visno2, day = parse_midas_data.calculate_all_time_pairs(subject_sample_time_map, high_cov_samples_low_pis)
###################################################################
# create subplots to plot the 2D and 1D SFSs side by side
###################################################################
fig_annotation='low_pis'
# compute the 1D sfs
sys.stderr.write("Calculate within person SFS for low pis...\n")
sample_freqs, passed_sites = diversity_utils.calculate_sample_freqs(allele_counts_map, passed_sites_map, variant_type='4D',fold=False)
sfss= []
bins = numpy.linspace(0.04,0.95,21)
bin_locations = bins[1:]-(bins[1]-bins[0])/2
for j in xrange(0,len(samples)):
counts,dummy = numpy.histogram(sample_freqs[j],bins)
if counts.sum()<0.5:
sfs = numpy.zeros_like(bin_locations)
else:
sfs = counts*1.0/(passed_sites[j])
sfss.append(sfs)
# select the desired samples:
sfss=numpy.asarray(sfss)
sfss=sfss[desired_samples_low_pis]
sys.stderr.write("Done!\n")
# compute 2D SFS, plot, along iwth 2D SFS
# these sfs are polarized based on teh consensus allele.
# note that desired samples includes the same as what is in high_cov_samples
sample_freqs_2D, passed_sites_2D, joint_passed_sites_2D = diversity_utils.calculate_sample_freqs_2D(allele_counts_map, passed_sites_map, desired_samples_low_pis, variant_type='4D', fold=False)
xbins = numpy.linspace(0,1,21)
ybins = numpy.linspace(0,1,21)
pylab.figure(figsize=(6, len(time_pair_idxs[0])*3))
color=['b','r']
plot_no=1
for j in range(0, len(time_pair_idxs[0])):
#plot the 2D SFS
pylab.subplot(len(time_pair_idxs[0]), 2, plot_no)
idx1=time_pair_idxs[0][j]
idx2=time_pair_idxs[1][j]
sample_name1=high_cov_samples_low_pis[idx1]
sample_name2=high_cov_samples_low_pis[idx2]
freqs_idx1=numpy.array(sample_freqs_2D[idx1])
freqs_idx2=numpy.array(sample_freqs_2D[idx2])
joint_passed_idx1=numpy.array(joint_passed_sites_2D[idx1])
joint_passed_idx2=numpy.array(joint_passed_sites_2D[idx2])
joint_passed_idx1_idx2=(joint_passed_idx1)*(joint_passed_idx2)
joint_passed_idx1_idx2=numpy.where(joint_passed_idx1_idx2==True)
counts, xbins, ybins = numpy.histogram2d(freqs_idx1[joint_passed_idx1_idx2], freqs_idx2[joint_passed_idx1_idx2], bins=(xbins, ybins))
sfs_2D=counts*1.0/(passed_sites_2D[idx1,idx2])
pylab.xlabel('time point 1')
pylab.ylabel('time point 2')
pylab.xlim([0,1])
pylab.ylim([0,1])
pylab.title(sample_name1+', '+sample_name2)
im=pylab.imshow(sfs_2D.T,interpolation='nearest', origin='low',extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]], norm=LogNorm(vmin=1e-5, vmax=1e-2), cmap='jet')
pylab.colorbar(im)
plot_no+=1
# plot the 1D SFS
pylab.subplot(len(time_pair_idxs[0]), 2, plot_no)
pylab.xlabel('Minor allele frequency')
pylab.ylabel('SFS')
pylab.xlim([0,1])
pylab.ylim([3e-06,3e-02])
colNo=0
for idx in [idx1,idx2]:
if sfss[idx].sum()!=0:
normalized_sfs = sfss[idx]
pylab.semilogy(bin_locations+normal(0,1)*(bin_locations[1]-bin_locations[0])*0.1, normalized_sfs,'.-',alpha=0.5, color=color[colNo])
colNo +=1
pylab.legend(['first time pt', 'second time pt'],'upper right',prop={'size':6})
plot_no+=1
pylab.savefig('%s/%s_within_person_2D_sfs_time_pair_polarized_%s.png' % (parse_midas_data.analysis_directory,species_name, fig_annotation),bbox_inches='tight')
#######################
# repeat for high pis #
#######################
# Only plot samples above a certain depth threshold that are "haploids"
high_cov_samples_high_pis = samples[(median_coverages>=min_coverage)*(pis>1e-03)]
desired_samples_high_pis=(median_coverages>=min_coverage)*(pis>1e-03)
# get the time info for the high_cov_samples_low_pis -- this is for all visno combos and all samples irrespective of coverage and pis
time_pair_idxs, visno1, visno2, day = parse_midas_data.calculate_all_time_pairs(subject_sample_time_map, high_cov_samples_high_pis)
###################################################################
# create subplots to plot the 2D and 1D SFSs side by side
###################################################################
fig_annotation='high_pis'
# compute the 1D sfs
sys.stderr.write("Calculate within person SFS for high pis...\n")
sample_freqs, passed_sites = diversity_utils.calculate_sample_freqs(allele_counts_map, passed_sites_map, variant_type='4D',fold=False)
sfss= []
bins = numpy.linspace(0.04,0.95,21)
bin_locations = bins[1:]-(bins[1]-bins[0])/2
for j in xrange(0,len(samples)):
counts,dummy = numpy.histogram(sample_freqs[j],bins)
if counts.sum()<0.5:
sfs = numpy.zeros_like(bin_locations)
else:
sfs = counts*1.0/(passed_sites[j])
sfss.append(sfs)
# select the desired samples:
sfss=numpy.asarray(sfss)
sfss=sfss[desired_samples_high_pis]
sys.stderr.write("Done!\n")
# compute 2D SFS, plot, along iwth 2D SFS
# these sfs are polarized based on teh consensus allele.
# note that desired samples includes the same as what is in high_cov_samples
sample_freqs_2D, passed_sites_2D, joint_passed_sites_2D = diversity_utils.calculate_sample_freqs_2D(allele_counts_map, passed_sites_map, desired_samples_high_pis, variant_type='4D', fold=False)
xbins = numpy.linspace(0,1,21)
ybins = numpy.linspace(0,1,21)
pylab.figure(figsize=(6, len(time_pair_idxs[0])*3))
color=['b','r']
plot_no=1
for j in range(0, len(time_pair_idxs[0])):
#plot the 2D SFS
pylab.subplot(len(time_pair_idxs[0]), 2, plot_no)
idx1=time_pair_idxs[0][j]
idx2=time_pair_idxs[1][j]
sample_name1=high_cov_samples_high_pis[idx1]
sample_name2=high_cov_samples_high_pis[idx2]
freqs_idx1=numpy.array(sample_freqs_2D[idx1])
freqs_idx2=numpy.array(sample_freqs_2D[idx2])
joint_passed_idx1=numpy.array(joint_passed_sites_2D[idx1])
joint_passed_idx2=numpy.array(joint_passed_sites_2D[idx2])
joint_passed_idx1_idx2=(joint_passed_idx1)*(joint_passed_idx2)
joint_passed_idx1_idx2=numpy.where(joint_passed_idx1_idx2==True)
counts, xbins, ybins = numpy.histogram2d(freqs_idx1[joint_passed_idx1_idx2], freqs_idx2[joint_passed_idx1_idx2], bins=(xbins, ybins))
sfs_2D=counts*1.0/(passed_sites_2D[idx1,idx2])
pylab.xlabel('time point 1')
pylab.ylabel('time point 2')
pylab.xlim([0,1])
pylab.ylim([0,1])
pylab.title(sample_name1+', '+sample_name2)
im=pylab.imshow(sfs_2D.T,interpolation='nearest', origin='low',extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]], norm=LogNorm(vmin=1e-5, vmax=1e-2), cmap='jet')
pylab.colorbar(im)
plot_no+=1
# plot the 1D SFS
pylab.subplot(len(time_pair_idxs[0]), 2, plot_no)
pylab.xlabel('Minor allele frequency')
pylab.ylabel('SFS')
pylab.xlim([0,1])
pylab.ylim([3e-06,3e-02])
colNo=0
for idx in [idx1,idx2]:
if sfss[idx].sum()!=0:
normalized_sfs = sfss[idx]
pylab.semilogy(bin_locations+normal(0,1)*(bin_locations[1]-bin_locations[0])*0.1, normalized_sfs,'.-',alpha=0.5, color=color[colNo])
colNo +=1
pylab.legend(['first time pt', 'second time pt'],'upper right',prop={'size':6})
plot_no+=1
pylab.savefig('%s/%s_within_person_2D_sfs_time_pair_polarized_%s.png' % (parse_midas_data.analysis_directory,species_name, fig_annotation),bbox_inches='tight')
#######################
# repeat for any pis #
#######################
# Only plot samples above a certain depth threshold that are "haploids"
high_cov_samples_any_pis = samples[(median_coverages>=min_coverage)]
desired_samples_any_pis=(median_coverages>=min_coverage)
# get the time info for the high_cov_samples_low_pis -- this is for all visno combos and all samples irrespective of coverage and pis
time_pair_idxs, visno1, visno2, day = parse_midas_data.calculate_all_time_pairs(subject_sample_time_map, high_cov_samples_any_pis)
###################################################################
# create subplots to plot the 2D and 1D SFSs side by side
###################################################################
fig_annotation='any_pis'
# compute the 1D sfs
sys.stderr.write("Calculate within person SFS for any pis...\n")
sample_freqs, passed_sites = diversity_utils.calculate_sample_freqs(allele_counts_map, passed_sites_map, variant_type='4D',fold=False)
sfss= []
bins = numpy.linspace(0.04,0.95,21)
bin_locations = bins[1:]-(bins[1]-bins[0])/2
for j in xrange(0,len(samples)):
counts,dummy = numpy.histogram(sample_freqs[j],bins)
if counts.sum()<0.5:
sfs = numpy.zeros_like(bin_locations)
else:
sfs = counts*1.0/(passed_sites[j])
sfss.append(sfs)
# select the desired samples:
sfss=numpy.asarray(sfss)
sfss=sfss[desired_samples_any_pis]
sys.stderr.write("Done!\n")
# compute 2D SFS, plot, along iwth 2D SFS
# these sfs are polarized based on teh consensus allele.
# note that desired samples includes the same as what is in high_cov_samples
sample_freqs_2D, passed_sites_2D, joint_passed_sites_2D = diversity_utils.calculate_sample_freqs_2D(allele_counts_map, passed_sites_map, desired_samples_any_pis, variant_type='4D', fold=False)
xbins = numpy.linspace(0,1,21)
ybins = numpy.linspace(0,1,21)
pylab.figure(figsize=(6, len(time_pair_idxs[0])*3))
color=['b','r']
plot_no=1
for j in range(0, len(time_pair_idxs[0])):
#plot the 2D SFS
pylab.subplot(len(time_pair_idxs[0]), 2, plot_no)
idx1=time_pair_idxs[0][j]
idx2=time_pair_idxs[1][j]
sample_name1=high_cov_samples_any_pis[idx1]
sample_name2=high_cov_samples_any_pis[idx2]
freqs_idx1=numpy.array(sample_freqs_2D[idx1])
freqs_idx2=numpy.array(sample_freqs_2D[idx2])
joint_passed_idx1=numpy.array(joint_passed_sites_2D[idx1])
joint_passed_idx2=numpy.array(joint_passed_sites_2D[idx2])
joint_passed_idx1_idx2=(joint_passed_idx1)*(joint_passed_idx2)
joint_passed_idx1_idx2=numpy.where(joint_passed_idx1_idx2==True)
counts, xbins, ybins = numpy.histogram2d(freqs_idx1[joint_passed_idx1_idx2], freqs_idx2[joint_passed_idx1_idx2], bins=(xbins, ybins))
sfs_2D=counts*1.0/(passed_sites_2D[idx1,idx2])
pylab.xlabel('time point 1')
pylab.ylabel('time point 2')
pylab.xlim([0,1])
pylab.ylim([0,1])
pylab.title(sample_name1+', '+sample_name2)
im=pylab.imshow(sfs_2D.T,interpolation='nearest', origin='low',extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]], norm=LogNorm(vmin=1e-5, vmax=1e-2), cmap='jet')
pylab.colorbar(im)
plot_no+=1
# plot the 1D SFS
pylab.subplot(len(time_pair_idxs[0]), 2, plot_no)
pylab.xlabel('Minor allele frequency')
pylab.ylabel('SFS')
pylab.xlim([0,1])
pylab.ylim([3e-06,3e-02])
colNo=0
for idx in [idx1,idx2]:
if sfss[idx].sum()!=0:
normalized_sfs = sfss[idx]
pylab.semilogy(bin_locations+normal(0,1)*(bin_locations[1]-bin_locations[0])*0.1, normalized_sfs,'.-',alpha=0.5, color=color[colNo])
colNo +=1
pylab.legend(['first time pt', 'second time pt'],'upper right',prop={'size':6})
plot_no+=1
pylab.savefig('%s/%s_within_person_2D_sfs_time_pair_polarized_%s.png' % (parse_midas_data.analysis_directory,species_name, fig_annotation),bbox_inches='tight')
##############################
'''
#######
# old
#folded SFS
sys.stderr.write("Calculate within person SFS...\n")
sample_freqs, passed_sites = diversity_utils.calculate_sample_freqs( allele_counts_map, passed_sites_map, variant_type='4D')
sfss = []
#pi_withins = []
bins = numpy.linspace(0.04,0.51,11)
bin_locations = bins[1:]-(bins[1]-bins[0])/2
for j in xrange(0,len(samples)):
#pi_within = pis[j]
counts,dummy = numpy.histogram(sample_freqs[j],bins)
if counts.sum()<0.5:
sfs = numpy.zeros_like(bin_locations)
else:
sfs = counts*1.0/(passed_sites[j])
sfss.append(sfs)
#pi_withins.append(pi_within)
sys.stderr.write("Done!\n")
##########
# Plot: #
##########
# Folded SFSs
# iterate through time pairs
color=['b','r']
for j in range(0, len(time_pair_idxs[0])):
pylab.figure()
pylab.xlabel('Minor allele frequency')
pylab.ylabel('SFS')
pylab.xlim([0,0.5])
pylab.ylim([3e-06,3e-02])
pylab.title(species_name)
idx1=time_pair_idxs[0][j]
idx2=time_pair_idxs[1][j]
sample_name1=samples[idx1]
sample_name2=samples[idx2]
colNo=0
for idx in [idx1,idx2]:
if sfss[idx].sum()!=0:
normalized_sfs = sfss[idx]
pylab.semilogy(bin_locations+normal(0,1)*(bin_locations[1]-bin_locations[0])*0.1, normalized_sfs,'.-',alpha=0.5, color=color[colNo])
colNo +=1
pylab.legend(['first time pt', 'second time pt'],'upper right',prop={'size':6})
pylab.savefig('%s/%s_within_person_sfs_time_pair_folded_%s_%s_low_pis.png' % (parse_midas_data.analysis_directory,species_name, sample_name1, sample_name2),bbox_inches='tight')
#################
#polarized SFS #
#################
sys.stderr.write("Calculate within person SFS...\n")
sample_freqs, passed_sites = diversity_utils.calculate_sample_freqs( allele_counts_map, passed_sites_map, variant_type='4D',fold=False)
sfss= []
bins = numpy.linspace(0.04,0.95,21)
bin_locations = bins[1:]-(bins[1]-bins[0])/2
for j in xrange(0,len(samples)):
counts,dummy = numpy.histogram(sample_freqs[j],bins)
if counts.sum()<0.5:
sfs = numpy.zeros_like(bin_locations)
else:
sfs = counts*1.0/(passed_sites[j])
sfss.append(sfs)
sys.stderr.write("Done!\n")
# plot Polarized SFSs
# iterate through time pairs
color=['b','r']
for j in range(0, len(time_pair_idxs[0])):
pylab.figure()
pylab.xlabel('Minor allele frequency')
pylab.ylabel('SFS')
pylab.xlim([0,1])
pylab.ylim([3e-06,3e-02])
pylab.title(species_name)
idx1=time_pair_idxs[0][j]
idx2=time_pair_idxs[1][j]
sample_name1=samples[idx1]
sample_name2=samples[idx2]
colNo=0
for idx in [idx1,idx2]:
if sfss[idx].sum()!=0:
normalized_sfs = sfss[idx]
pylab.semilogy(bin_locations+normal(0,1)*(bin_locations[1]-bin_locations[0])*0.1, normalized_sfs,'.-',alpha=0.5, color=color[colNo])
colNo +=1
pylab.legend(['first time pt', 'second time pt'],'upper right',prop={'size':6})
pylab.savefig('%s/%s_within_person_sfs_time_pair_polarized_%s_%s_low_pis.png' % (parse_midas_data.analysis_directory,species_name, sample_name1, sample_name2),bbox_inches='tight')
###################################################################
# Plot 2d SFS for time pairs (polarized)
###################################################################
# get the time info for the snp_samples -- this is for all visno combos
# this time condition on coverage being high and pis being low.
time_pair_idxs, visno1, visno2, day = parse_midas_data.calculate_all_time_pairs(subject_sample_time_map, high_cov_samples)
# these sfs are polarized based on teh consensus allele.
# note that desired samples includes the same as what is in high_cov_samples
sample_freqs_2D, passed_sites_2D, joint_passed_sites_2D = diversity_utils.calculate_sample_freqs_2D(allele_counts_map, passed_sites_map, desired_samples, variant_type='4D', fold=False)
xbins = numpy.linspace(0,1,21)
ybins = numpy.linspace(0,1,21)
for j in range(0, len(time_pair_idxs[0])):
idx1=time_pair_idxs[0][j]
idx2=time_pair_idxs[1][j]
sample_name1=high_cov_samples[idx1]
sample_name2=high_cov_samples[idx2]
freqs_idx1=numpy.array(sample_freqs_2D[idx1])
freqs_idx2=numpy.array(sample_freqs_2D[idx2])
joint_passed_idx1=numpy.array(joint_passed_sites_2D[idx1])
joint_passed_idx2=numpy.array(joint_passed_sites_2D[idx2])
joint_passed_idx1_idx2=(joint_passed_idx1)*(joint_passed_idx2)
joint_passed_idx1_idx2=numpy.where(joint_passed_idx1_idx2==True)
counts, xbins, ybins = numpy.histogram2d(freqs_idx1[joint_passed_idx1_idx2], freqs_idx2[joint_passed_idx1_idx2], bins=(xbins, ybins))
sfs_2D=counts*1.0/(passed_sites_2D[idx1,idx2])
pylab.figure()
pylab.xlabel('time point 1')
pylab.ylabel('time point 2')
pylab.xlim([0,1])
pylab.ylim([0,1])
pylab.title(species_name)
im=pylab.imshow(sfs_2D.T,interpolation='nearest', origin='low',extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]], norm=LogNorm(vmin=1e-5, vmax=1), cmap='jet')
pylab.colorbar(im)
pylab.savefig('%s/%s_within_person_2D_sfs_time_pair_polarized_%s_%s_low_pis.png' % (parse_midas_data.analysis_directory,species_name, sample_name1, sample_name2),bbox_inches='tight')
###################################################################
# Plot 2d SFS for time pairs (folded)
###################################################################
sample_freqs_2D, passed_sites_2D, joint_passed_sites_2D = diversity_utils.calculate_sample_freqs_2D(allele_counts_map, passed_sites_map, desired_samples, variant_type='4D')
xbins = numpy.linspace(0,1.1,21)
ybins = numpy.linspace(0,1.1,21)
for j in range(0, len(time_pair_idxs[0])):
idx1=time_pair_idxs[0][j]
idx2=time_pair_idxs[1][j]
sample_name1=high_cov_samples[idx1]
sample_name2=high_cov_samples[idx2]
freqs_idx1=numpy.array(sample_freqs_2D[idx1])
freqs_idx2=numpy.array(sample_freqs_2D[idx2])
joint_passed_idx1=numpy.array(joint_passed_sites_2D[idx1])
joint_passed_idx2=numpy.array(joint_passed_sites_2D[idx2])
joint_passed_idx1_idx2=(joint_passed_idx1)*(joint_passed_idx2)
joint_passed_idx1_idx2=numpy.where(joint_passed_idx1_idx2==True)
counts, xbins, ybins = numpy.histogram2d(freqs_idx1[joint_passed_idx1_idx2], freqs_idx2[joint_passed_idx1_idx2], bins=(xbins, ybins))
sfs_2D=counts*1.0/(passed_sites_2D[idx1,idx2])
pylab.figure()
pylab.xlabel('time point 1')
pylab.ylabel('time point 2')
pylab.xlim([0,1])
pylab.ylim([0,1])
pylab.title(species_name)
im=pylab.imshow(sfs_2D.T,interpolation='nearest', origin='low',extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]], norm=LogNorm(vmin=1e-5, vmax=1e-2), cmap='jet')
pylab.colorbar(im)
pylab.savefig('%s/%s_within_person_2D_sfs_time_pair_folded_%s_%s_low_pis.png' % (parse_midas_data.analysis_directory,species_name, sample_name1, sample_name2),bbox_inches='tight')
'''
|
20,634 | 548e5f43f1105eb163e3bea57617fd0f0c799220 | from __future__ import print_function, absolute_import
import time
import numpy as np
import collections
import torch
import torch.nn as nn
from torch.nn import functional as F
from ice.loss import CrossEntropyLabelSmooth, ViewContrastiveLoss
from .utils.meters import AverageMeter
from .evaluation_metrics import accuracy
class ImageTrainer(object):
def __init__(self, model_1, model_1_ema, num_cluster=500, alpha=0.999, num_instance=4, tau_c=0.5, tau_v=0.09,
scale_kl=2.0):
super(ImageTrainer, self).__init__()
self.model_1 = model_1
self.model_1_ema = model_1_ema
self.alpha = alpha
self.tau_c = tau_c
self.tau_v = tau_v
self.scale_kl = scale_kl
self.ccloss = CrossEntropyLabelSmooth(num_cluster)
self.vcloss = ViewContrastiveLoss(num_instance=num_instance, T=tau_v)
self.kl = nn.KLDivLoss(reduction='batchmean')
self.crosscam_epoch = 0
self.beta = 0.07
self.bg_knn = 50
self.mse = nn.MSELoss(reduction='sum')
def train(self, epoch, data_loader_target,
optimizer, print_freq=1, train_iters=200, centers=None, intra_id_labels=None, intra_id_features=None,
cams=None, all_pseudo_label=None):
self.model_1.train()
self.model_1_ema.train()
centers = centers.cuda()
# outliers = outliers.cuda()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ccl = AverageMeter()
losses_vcl = AverageMeter()
losses_cam = AverageMeter()
losses_kl = AverageMeter()
precisions = AverageMeter()
self.all_img_cams = torch.tensor(cams).cuda()
self.unique_cams = torch.unique(self.all_img_cams)
# print(self.unique_cams)
self.all_pseudo_label = torch.tensor(all_pseudo_label).cuda()
self.init_intra_id_feat = intra_id_features
# print(len(self.init_intra_id_feat))
# initialize proxy memory
self.percam_memory = []
self.memory_class_mapper = []
self.concate_intra_class = []
for cc in self.unique_cams:
percam_ind = torch.nonzero(self.all_img_cams == cc).squeeze(-1)
uniq_class = torch.unique(self.all_pseudo_label[percam_ind])
uniq_class = uniq_class[uniq_class >= 0]
self.concate_intra_class.append(uniq_class)
cls_mapper = {int(uniq_class[j]): j for j in range(len(uniq_class))}
self.memory_class_mapper.append(cls_mapper) # from pseudo label to index under each camera
if len(self.init_intra_id_feat) > 0:
# print('initializing ID memory from updated embedding features...')
proto_memory = self.init_intra_id_feat[cc]
proto_memory = proto_memory.cuda()
self.percam_memory.append(proto_memory.detach())
self.concate_intra_class = torch.cat(self.concate_intra_class)
if epoch >= self.crosscam_epoch:
percam_tempV = []
for ii in self.unique_cams:
percam_tempV.append(self.percam_memory[ii].detach().clone())
percam_tempV = torch.cat(percam_tempV, dim=0).cuda()
end = time.time()
for i in range(train_iters):
target_inputs = data_loader_target.next()
data_time.update(time.time() - end)
# process inputs
inputs_1, inputs_weak, targets, inputs_2, cids = self._parse_data(target_inputs)
b, c, h, w = inputs_1.size()
# ids for ShuffleBN
shuffle_ids, reverse_ids = self.get_shuffle_ids(b)
f_out_t1 = self.model_1(inputs_1)
p_out_t1 = torch.matmul(f_out_t1, centers.transpose(1, 0)) / self.tau_c
f_out_t2 = self.model_1(inputs_2)
loss_cam = torch.tensor([0.]).cuda()
for cc in torch.unique(cids):
# print(cc)
inds = torch.nonzero(cids == cc).squeeze(-1)
percam_targets = targets[inds]
# print(percam_targets)
percam_feat = f_out_t1[inds]
# # intra-camera loss
# mapped_targets = [self.memory_class_mapper[cc][int(k)] for k in percam_targets]
# mapped_targets = torch.tensor(mapped_targets).to(torch.device('cuda'))
# # percam_inputs = ExemplarMemory.apply(percam_feat, mapped_targets, self.percam_memory[cc], self.alpha)
# percam_inputs = torch.matmul(F.normalize(percam_feat), F.normalize(self.percam_memory[cc].t()))
# percam_inputs /= self.beta # similarity score before softmax
# loss_cam += F.cross_entropy(percam_inputs, mapped_targets)
# cross-camera loss
if epoch >= self.crosscam_epoch:
associate_loss = 0
# target_inputs = percam_feat.mm(percam_tempV.t().clone())
target_inputs = torch.matmul(F.normalize(percam_feat), F.normalize(percam_tempV.t().clone()))
temp_sims = target_inputs.detach().clone()
target_inputs /= self.beta
for k in range(len(percam_feat)):
ori_asso_ind = torch.nonzero(self.concate_intra_class == percam_targets[k]).squeeze(-1)
temp_sims[k, ori_asso_ind] = -10000.0 # mask out positive
sel_ind = torch.sort(temp_sims[k])[1][-self.bg_knn:]
concated_input = torch.cat((target_inputs[k, ori_asso_ind], target_inputs[k, sel_ind]), dim=0)
concated_target = torch.zeros((len(concated_input)), dtype=concated_input.dtype).to(
torch.device('cuda'))
concated_target[0:len(ori_asso_ind)] = 1.0 / len(ori_asso_ind)
associate_loss += -1 * (
F.log_softmax(concated_input.unsqueeze(0), dim=1) * concated_target.unsqueeze(
0)).sum()
loss_cam += 0.5 * associate_loss / len(percam_feat)
with torch.no_grad():
inputs_1 = inputs_1[shuffle_ids]
f_out_t1_ema = self.model_1_ema(inputs_1)
f_out_t1_ema = f_out_t1_ema[reverse_ids]
inputs_2 = inputs_2[shuffle_ids]
f_out_t2_ema = self.model_1_ema(inputs_2)
f_out_t2_ema = f_out_t2_ema[reverse_ids]
inputs_weak = inputs_weak[shuffle_ids]
f_out_weak_ema = self.model_1_ema(inputs_weak)
f_out_weak_ema = f_out_weak_ema[reverse_ids]
loss_ccl = self.ccloss(p_out_t1, targets)
loss_vcl = self.vcloss(F.normalize(f_out_t1), F.normalize(f_out_t2_ema), targets)
loss_kl = self.kl(F.softmax(
torch.matmul(F.normalize(f_out_t1), F.normalize(f_out_t2_ema).transpose(1, 0)) / self.scale_kl,
dim=1).log(),
F.softmax(torch.matmul(F.normalize(f_out_weak_ema),
F.normalize(f_out_weak_ema).transpose(1, 0)) / self.scale_kl,
dim=1)) * 10
loss = loss_ccl + loss_vcl + loss_cam + loss_kl
optimizer.zero_grad()
loss.backward()
optimizer.step()
self._update_ema_variables(self.model_1, self.model_1_ema, self.alpha, epoch * len(data_loader_target) + i)
prec_1, = accuracy(p_out_t1.data, targets.data)
losses_ccl.update(loss_ccl.item())
losses_cam.update(loss_cam.item())
losses_vcl.update(loss_vcl.item())
losses_kl.update(loss_kl.item())
precisions.update(prec_1[0])
# print log #
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ccl {:.3f}\t'
'Loss_hard_instance {:.3f}\t'
'Loss_cam {:.3f}\t'
'Loss_kl {:.3f}\t'
'Prec {:.2%}\t'
.format(epoch, i + 1, len(data_loader_target),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ccl.avg,
losses_vcl.avg,
losses_cam.avg,
losses_kl.avg,
precisions.avg))
def _update_ema_variables(self, model, ema_model, alpha, global_step):
# alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(param.data, alpha=1 - alpha)
def _parse_data(self, inputs):
imgs_1, imgs_2, img_mutual, pids, cids = inputs
inputs_1 = imgs_1.cuda()
inputs_2 = imgs_2.cuda()
inputs_mutual = img_mutual.cuda()
targets = pids.cuda()
cids = cids.cuda()
return inputs_1, inputs_2, targets, inputs_mutual, cids
def get_shuffle_ids(self, bsz):
"""generate shuffle ids for ShuffleBN"""
forward_inds = torch.randperm(bsz).long().cuda()
backward_inds = torch.zeros(bsz).long().cuda()
value = torch.arange(bsz).long().cuda()
backward_inds.index_copy_(0, forward_inds, value)
return forward_inds, backward_inds
|
20,635 | 8ca06704040276f35e4170c6e9c5cc9e3e726042 | #Built-in Libraries
import math
from random import uniform
from random import randrange
import argparse
import os
import string
import ctypes
#external libraries
import numpy
import ogr
import osr
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.cm as cm
import Image
from matplotlib.image import imread
from mpl_toolkits.basemap import Basemap
from osgeo import gdal
#Constants / Globals
global velocity
global angle
global num
def create_shapefile(xdata, ydata, shapefile):
output = shapefile[0]
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
ds = drv.CreateDataSource(output)
layer = ds.CreateLayer("point_out",geom_type=ogr.wkbPoint)
#Write fields
field_x = ogr.FieldDefn()
field_x.SetName('xCoord')
field_x.SetType(ogr.OFTReal)
field_x.SetWidth(15)
field_x.SetPrecision(6)
layer.CreateField(field_x)
field_y = ogr.FieldDefn()
field_y.SetName('yCoord')
field_y.SetType(ogr.OFTReal)
field_y.SetWidth(15)
field_y.SetPrecision(6)
layer.CreateField(field_y)
field_itnum = ogr.FieldDefn()
field_itnum.SetName('IterNum')
field_itnum.SetType(ogr.OFTInteger)
field_itnum.SetWidth(10)
field_itnum.SetPrecision(1)
layer.CreateField(field_itnum)
#Iterate over the coordinate arrays and write the row
for index in range(len(xdata+1)):
feat = ogr.Feature(layer.GetLayerDefn())
feat.SetField('IterNum', index)
feat.SetField('xCoord', xdata[index])
feat.SetField('yCoord', ydata[index])
pt = ogr.Geometry(ogr.wkbPoint)
pt.AddPoint_2D(xdata[index], ydata[index])
feat.SetGeometry(pt)
layer.CreateFeature(feat)
spatialRef = osr.SpatialReference()
spatialRef.SetGeogCS("GCS_Moon_2000",
"D_Moon_2000",
"Moon_localradius",1737400.0, 0.0,
"Prime Meridian",0.0,
"Degree",0.0174532925199433 )
#Output the .prj file.
spatialRef.MorphToESRI()
basename = output.split('.')[0]
file = open(basename + ".prj", 'w')
file.write(spatialRef.ExportToWkt())
file.close()
def init(xarr_, yarr_):
global xarr
global yarr
xarr = xarr_
yarr = yarr_
def f(v):
f = v
return f #Using return at the end of a def statement passes the variable back to the calling function.
def random_azimuth():
'''This function returns a random floating point number between 1 and 360'''
#use normalvariate(mean, std) for a gaussian distribution
#A more complex weighting can be achieved, but would need to be modeled.
return uniform(0,360)
def strom_multi(xarr,yarr,i):
for index in range(len(xarr[i])):
#distance and coordinates
distance, angle, elevation = calc_distance()
azimuth = random_azimuth()
Xcoordinate = distance * math.sin(azimuth * math.pi/180) #Conversion to radians
Ycoordinate = distance * math.cos(azimuth* math.pi/180)
#The WAC visible spectrum data is 100mpp or 0.003297790480378 degrees / pixel.
Xcoordinate /= 100
Xcoordinate *= 0.003297790480378
Ycoordinate /= 100
Ycoordinate *= 0.003297790480378
x = float(Xcoordinate)
y = float(Ycoordinate)
#Randomly select the origin point along the linear vent
rand_index = randrange(0,10)
xorigin, yorigin = (xpt[rand_index], ypt[rand_index])
distance = check_topography(dtm, xorigin, yorigin, x+xorigin, y+yorigin, distance,elevation, dev, gtinv)
if distance[1] == True:
x = (distance[0] * math.sin(azimuth * math.pi/180))
y = (distance[0] * math.cos(azimuth* math.pi/180))
#Convert back to degrees
x /= 100
x *= 0.003297790480378
y /= 100
y *= 0.003297790480378
else:
pass
xarr[i][index] = x+xorigin
yarr[i][index] = y+yorigin
def calc_height(distance, ejectionangle, g, ejectionvelocity):
'''
height@x = initital_height + distance(tan(theta)) - ((g(x^2))/(2(v(cos(theta))^2))
initial_height = 0, a planar surface is fit to some reference elevation.
distance is in meters
angle is in radians
'''
trajectory = numpy.linspace(0,distance, distance/100,endpoint=True )
elevation = (trajectory * math.tan(ejectionangle)) - ((g*(trajectory**2)) / (2*((ejectionvelocity * math.cos(ejectionangle))**2)))
return elevation
def calc_distance():
g = 1.6249
#Calculate the ejection angle randomly from a range
ejectionangle = uniform(angle[0],angle[1])
ejectionangle *= math.pi/180 #Convert to radians
theta = math.sin(2*ejectionangle)
#Determine the ejection velocity randomly from a range
ejectionvelocity = uniform(velocity[0], velocity[1])
v2 = ejectionvelocity * ejectionvelocity
#Calculate total theoretical travel distance
distance = (v2 * theta) / g
#Calculate the elevation over a planar surface
elevation = calc_height(distance, ejectionangle, g, ejectionvelocity)
return distance, ejectionangle, elevation
def stromboli2():
'''distance = (velocity^2*(sin(2theta))) / gravity'''
p = 0
while p <= num:
p+=1
g = 1.6249 #Gravitational acceleration on the moon
distance, angle, elevation = calc_distance()
azimuth = random_azimuth()
Xcoordinate = distance * math.sin(azimuth * math.pi/180) #Conversion to radians
Ycoordinate = distance * math.cos(azimuth* math.pi/180)
#The WAC visible spectrum data is 100mpp or 0.003297790480378 degrees / pixel.
Xcoordinate /= 100
Xcoordinate *= 0.003297790480378
Ycoordinate /= 100
Ycoordinate *= 0.003297790480378
yield Xcoordinate, Ycoordinate, angle, azimuth, elevation, distance
if p > num:
done = False
yield done
def check_topography(dtm, originx, originy, destx, desty, distance,elevation, dev, gtinv):
'''
This function checks for impact due to variation in topography by
mimicing the functionality of a topographic profile from polyline.
1. Generate 2 arrays. One of X coordinates and one of Y coordinates
2. Transform these from GCS to PCS
3. Create a new array with the elevations extracted from the dtm
4. Compare it to the analytical trajectory heights
5. If the impact occurs before total potential travel distance,
drop the projectile there. If not, place it at the total possible
travel distance.
Parameters
----------
dtm: A digital terrain model, in 16bit, storing terrain elevation, ndarray
originx: The x coord of the projectile launch, scalar
originy: The y coord of the projectile launch, scalar
destx: The x landing coordinate on a flat plane, scalar
desty: The y landing coordinate on a flat plane, scalar
distance: The total possible distance traveled, scalar
elevation: An array storing heights above 0 of the projectile at some
interval (100m by default)
dev: Geotransform parameters
gtinv: Inverse geotransform parameters
Returns
-------
distance: The new distance the projectile has traveled if it impacts
the topography.
ToDo:
I should grab an elevation line longer than total possible distance. On a planar surface the object lands at total length. On a surface with increasing slope it lands early;later on a downward slope. We do not test for downward slope.
'''
#Extract the elevation from the dtm along the vector
#We add 5km to distance as total theoretical distance may be exceeded by
# downward sloping terrain
xpt = numpy.linspace(originx,destx,num=(distance)/100, endpoint=True)
ypt = numpy.linspace(originy,desty,num=(distance)/100, endpoint=True)
xpt -= geotransform[0]
ypt -= geotransform[3]
xsam = numpy.round_((gtinv[1] *xpt + gtinv[2] * ypt), decimals=0)
ylin = numpy.round_((gtinv[4] *xpt + gtinv[5] * ypt), decimals=0)
try:
dtmvector = dtm[ylin.astype(int),xsam.astype(int)]
#Compute elevation of projectile from a plane at the origin height
dtmvectormin = dtmvector.min()
elevation -= abs(dtmvector[0])
#Compare the projectile elevation to the dtm
dtmvector += abs(dtmvectormin)
elevation -= dtmvector
elevation += dtmvectormin
#Ignore the first 2.5km of ejection distance to ensure that we get a valid elevation check.
impact = numpy.where(elevation[250:] <= 0)
try:
#We are working at 100mpp, so the new distance is index +1
return ((impact[0][0])+1) * 100, True
except:
return False
except:
print "Total distance travel exceeds model dimensions."
def density(m, xdata, ydata, shapefile, ppg):
'''
This function converts the lat/lon of the input map to meters
assuming an equirectangular projection. It then creates a grid at
100mpp, bins the input data into the grid (density) and creates a
histogram. Finally, a mesh grid is created and the histogram is
plotted in 2D over the basemap.
If the shapefile flag is set to true a shapefile is created by calling
the shapefile function.
Parameters:
m: A basemap mapping object
xdata: An array of x landing coordinates, ndarray
ydata: An array of y landing coordinates, ndarray
shapefile: A flag on whether or not to generate a shapefile
ppg: The number of meters per grid cell * 100
'''
#Convert from DD to m to create a mesh grid.
xmax = (m.xmax) / 0.003297790480378
xmin = (m.xmin) / 0.003297790480378
ymax = (m.ymax) / 0.003297790480378
ymin = (m.ymin) / 0.003297790480378
#Base 100mpp
nx = 1516 / int(ppg)
ny = 2123 / int(ppg)
#Convert to numpy arrays
xdata = numpy.asarray(xdata)
ydata = numpy.asarray(ydata)
#Bin the data & calculate the density
lon_bins = numpy.linspace(xdata.min(), xdata.max(), nx+1)
lat_bins = numpy.linspace(ydata.min(), ydata.max(), ny+1)
density, _, _ = numpy.histogram2d(ydata, xdata, [lat_bins, lon_bins])
#If the user wants a shapefile, pass the numpy arrays
if shapefile != None:
print "Writing model output to a shapefile."
create_shapefile(xdata, ydata, shapefile)
#Create a grid of equally spaced polygons
lon_bins_2d, lat_bins_2d = numpy.meshgrid(lon_bins, lat_bins)
if density.max() <= 3:
maxden = 5
else:
maxden = density.max()
#Mask the density array so that 0 is not plotted
density = numpy.ma.masked_where(density <=0, density)
plt.pcolormesh(lon_bins_2d,lat_bins_2d, density, cmap=cm.RdYlGn_r, vmin=0, vmax=maxden, alpha=0.5)
plt.colorbar(orientation='horizontal')
if __name__ == '__main__':
'''This is the main section which handles program flow.'''
#Parse all of the arguments.
parser = argparse.ArgumentParser(description='Stromboli Ejection Simulation Tool v1')
parser.add_argument('--velocity', '-v', action='store',nargs='+',default=[350,425], dest='velocity', help='A range of ejection velocities. ')
parser.add_argument('--angle','-a', action='store', nargs='+',default=[30, 60], dest='angle', help='Optional: A range of ejection angles. Example: -a 30 60')
parser.add_argument('-i', '--iterations', action='store', type=int, dest='i',default=500, help='The number of ejection iterations to perform.')
parser.add_argument('--shapefile', action='store',nargs=1, default=None, dest='shapefile', help='Use this flag to generate a shapefile, in Moon_2000GCS, of the point data.')
parser.add_argument('--fast', action='store', default=None, nargs=1, dest='multi', help='Use this flag to forgo creating a visualization and just create a shapefile. This uses all available processing cores and is substantially faster.')
parser.add_argument('--ppg', action='store', default=10, dest='ppg', help='The number of pixels per grid cell. Default is 10, which generates a 1000m grid square using 100mpp WAC Vis.')
args = parser.parse_args()
#Assign the user variables to the globals, not great form, but it works.
try:
velocity = [float(args.velocity[0]),float(args.velocity[1])]
except:
velocity = [float(args.velocity[0]),float(args.velocity[0])]
num = args.i
try:
angle = [float(args.angle[0]),float(args.angle[1])]
except:
angle = [float(args.angle[0]),float(args.angle[0])]
#Read the input DTM and get geotransformation info
ds = gdal.Open('wac_dtm.tif')
dtm = ds.ReadAsArray()
geotransform = ds.GetGeoTransform()
dev = (geotransform[1]*geotransform[5] - geotransform[2]*geotransform[4])
gtinv = ( geotransform[0] , geotransform[5]/dev, - geotransform[2]/dev, geotransform[3], - geotransform[4]/dev, geotransform[1]/dev)
#Set the approximate ejection coordinates
xpt = numpy.linspace(-97.788,-97.855,num=10, endpoint=True)
ypt = numpy.linspace(-30.263,-29.851,num=10, endpoint=True)
#If the user wants to process quickly then we omit the visualization and multiprocess to generate a shapefile
if args.multi is not None:
import multiprocessing
cores = multiprocessing.cpu_count()
cores *= 2
step = num // cores
xarray = numpy.frombuffer(multiprocessing.RawArray(ctypes.c_double, num))
yarray = numpy.frombuffer(multiprocessing.RawArray(ctypes.c_double, num))
init(xarray,yarray)
jobs = []
for i in range(0, num+1, step):
p = multiprocessing.Process(target=strom_multi, args=(xarr,yarr,slice(i, i+step)), )
jobs.append(p)
for job in jobs:
job.start()
for job in jobs:
job.join()
create_shapefile(xarr, yarr, args.multi)
else:
#Visualization - setup the plot
fig = plt.figure(figsize=(15,10))
ax1 = fig.add_subplot(1,2,1)
#Points that hit underlying topography
pt, = ax1.plot([], [],'ro', markersize=3)
xdata, ydata = [], []
#Points that travel the total theoretical distance
ptmax, = ax1.plot([],[], 'yo', markersize=3)
datamax, ydatamax = [],[]
#Map
lon_min = -102.5
lon_max = -93.5
lat_min = -34.5
lat_max = -25.5
m = Basemap(projection='cyl',llcrnrlat=lat_min,urcrnrlat=lat_max,
llcrnrlon=lon_min,urcrnrlon=lon_max,resolution=None, rsphere=(1737400.0,1737400.0))
m.drawmeridians(numpy.arange(lon_min+0.5, lon_max+1, 1), labels=[0,0,0,1], fontsize=10)
m.drawparallels(numpy.arange(lat_min+0.5,lat_max+1, 1), labels=[1,0,0,0], fontsize=10)
#Read the input image
im = imread('wac_global_vis.png')
m.imshow(im, origin='upper', cmap=cm.Greys_r, alpha=0.9)
def run(data):
if data == False:
density(m2,xdata, ydata, args.shapefile, args.ppg)
else:
#x, y are in degrees from the false origin 0,0
x,y, angle, azimuth, elevation, distance = data
rand_index = randrange(0,10)
#Randomly select the origin point along the linear vent
xorigin, yorigin = (xpt[rand_index], ypt[rand_index])
distance = check_topography(dtm, xorigin, yorigin, x+xorigin, y+yorigin, distance,elevation, dev, gtinv)
if distance[1] == True:
x = (distance[0] * math.sin(azimuth * math.pi/180))
y = (distance[0] * math.cos(azimuth* math.pi/180))
#Convert back to degrees
x /= 100
x *= 0.003297790480378
y /= 100
y *= 0.003297790480378
xdata.append(x + xorigin)
ydata.append(y + yorigin)
pt.set_data(xdata, ydata)
else:
print 'Particle landed at the maximum theoretical distance.'
#Convert back to degrees
x /= 100
x *= 0.003297790480378
y /= 100
y *= 0.003297790480378
xdatamax.append(x + xorigin)
ydatamax.append(y + yorigin)
#Set the point
ptmax.set_data(xdatamax, ydatamax)
print 'Angle: %f, Azimuth: %f, xCoordinate: %f, yCoordinate: %f' %(angle, azimuth,x+xorigin,y+yorigin)
return pt,
#Plot the volcano as approximated by a linear function.
plt.plot(xpt, ypt, 'bo', markersize=4)
#Run the animation
ani = animation.FuncAnimation(fig, run,stromboli2, interval=1, repeat=False, blit=False)
plt.title('Sample Deposition Using ' + (str(num+2)) + " Points")
ax2 = fig.add_subplot(1,2,2)
gridsize = str(int(args.ppg) * 100)
ax2.set_title('Impacts /' + gridsize + ' m')
m2 = Basemap(projection='cyl',llcrnrlat=lat_min,urcrnrlat=lat_max,
llcrnrlon=lon_min,urcrnrlon=lon_max,resolution=None, rsphere=(1737400.0,1737400.0))
m.drawmeridians(numpy.arange(lon_min+0.5, lon_max+1, 1), labels=[0,0,0,1], fontsize=10)
m.drawparallels(numpy.arange(lat_min+0.5,lat_max+1, 1), labels=[1,0,0,0], fontsize=10)
m2.imshow(im, origin='upper', cmap=cm.Greys_r)
plt.show()
#Save the animation
#ani.save('simulation.mp4', fps=10)
|
20,636 | e914421bff98849fec998dc86573e2b23b080a68 | from . import db
class Sorcerer(db.Model):
__tablename__ = 'sorcerers'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
guild_id = db.Column(db.Integer, db.ForeignKey('guilds.id', onupdate='CASCADE', ondelete='CASCADE'))
|
20,637 | ffc7a3311bb500e3e479e6a58d2962c1d26e11e8 | i = 0
while (i < 10):
print i
i = i + 1
def fib(n):
n1 = 1
n2 = 1
i = 0
while(i < n):
print n1
i = i+1
temp = n2
n2 = n1 + n2
n1 = temp
fib(5)
|
20,638 | 3fc363ee67c6d5a26f0861ea3d91d0e5e583a0b2 | def xml_get1(node,xpath):
return node.xpath(xpath)[0]
def xml_get1_or_none(node,xpath):
try:
return xml_get1(node,xpath)
except IndexError:
return None
def xml_get_text(node,xpath):
return xml_get1(node,xpath).text
def xml_get_text_or_none(node,xpath):
try:
return xml_get_text(node,xpath)
except IndexError: # xpath search found 0 matches
return None
def xml_xpath_text(node,xpath):
return [ n.text for n in node.xpath(xpath) ]
def xml_xpath_text_first(node,xpath):
try:
return xml_xpath_list_text(node,xpath)[0]
except IndexError:
return None
def arglist_to_dict(**args):
return dict(**args)
a2d = arglist_to_dict
|
20,639 | 5cde02606c0967f0ab76f26327551e78a5a9d812 | from dictionaries import primeNumbers
from random import randint
from math import gcd
def encrypting(e, n, text):
textEncrypting = []
for i in range(len(text)):
value = ord(text[i])
mod = (value ** e) % n
textEncrypting.append(mod)
return textEncrypting
def mdc(n):
x = 0
b = 1
while x != 1:
b = b + 1
x = gcd(n, b)
return b
def totient(p0, p1):
t = (p0 - 1) * (p1 - 1)
return t
def caculateN(p0, p1):
n = p0 * p1
return n
def generatePrimes():
primesList = primeNumbers()
x = randint(0, len(primesList) - 1)
y = randint(0, len(primesList) - 1)
while x == y:
y = randint(0, len(primesList) - 1)
primesGenerated = [primesList[x], primesList[y]]
return primesGenerated
if __name__ == '__main__':
primes = generatePrimes()
p0 = primes[0]
p1 = primes[1]
print(primes)
n = caculateN(p0, p1)
print(n)
t = (totient(p0, p1))
print(t)
b = (mdc(t))
print(b)
x = encrypting(b, t, 'meu passaro')
print(x) |
20,640 | 52adb1041c0b64dd5f681e1249987ef66039ccb4 | from random import choice, uniform
from client.beagle.beagle_api import api as bgl
from math import sin, cos, floor
from itertools import chain
class explosions(bgl.basic_sprite_renderer):
textures = None
num_textures = 16
primitive = bgl.primitive.unit_uv_square
shader = bgl.assets.get("beagle-2d/shader/beagle-2d")
def generate_texture():
size = choice( [ 32,64,128,256 ] )
num_parts = int(uniform(8.0,size))
colors = [ [1.0,0.0,1.0,1.0],
[0.5,0.0,1.0,1.0],[1.0,1.0,1.0,1.0] ]
image = []
for i in range(0, size*size):
image.append([0.0,0.0,0.0,0.0])
for i in range(0, num_parts):
r = uniform(0.0,3.14*2)
d = uniform(0.0,0.7)
x,y = cos(r)*d, sin(r)*d
x,y = x +1, y+ 1
x,y = x * size, y * size
x,y = floor(x / 2.0), floor(y / 2.0)
image[ int( (y*size)+x) ] = choice( colors )
return bgl.texture.from_data(size,size,list(chain(*image)))
def add_explosion(self, dead_item):
dead_item.explosion_life = 1.0
self.dead_items.append( dead_item )
self.explosion_impulse = 1.0
dead_item.texture = choice( explosions.textures )
def render(self):
for renderable in self.dead_items:
dead_item = renderable
sparams = renderable.get_shader_params()
el = dead_item.explosion_life
sparams["filter_color"] = [ el,el,el,el*0.5]
sparams["rotation_local"] = 0.0
explosions.primitive.render_shaded( explosions.shader, sparams )
renderable.size = renderable.size * 1.1
def generate_textures():
explosions.textures = []
for i in range( 0, explosions.num_textures ):
explosions.textures.append( explosions.generate_texture() )
def tick(self):
self.explosion_impulse = self.explosion_impulse * 0.98
for dead_item in self.dead_items:
dead_item.explosion_life = dead_item.explosion_life *0.9
dead_item.tick()
if dead_item.explosion_life < 0.1:
self.dead_items.remove(dead_item)
return True
def __init__(self):
self.explosion_impulse = 1.0
self.dead_items = []
if not explosions.textures:
explosions.generate_textures()
|
20,641 | 4beeb43d19e0f35720e23cfe6b894f28ae6da220 | import datetime
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import DateTime, TIMESTAMP
from .database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True, index=True)
email = Column(String)
password = Column(String)
date_of_birth = Column(String)
minimum_age_pref = Column(String)
maximum_age_pref = Column(String)
location = Column(String)
messages = relationship("Message", back_populates='owner')
class Message(Base):
__tablename__ = 'messages'
id = Column(Integer, primary_key=True, index=True)
message = Column(String)
send_at = Column(DateTime, default=datetime.datetime.utcnow)
owner = relationship('User', back_populates='messages') |
20,642 | 865139e97c768de113f0b3d493ca8aa0373c23c6 | import csv
with open('data/ratings.csv', 'r') as infile:
with open('data/new_ratings.csv', 'w') as outfile:
reader = csv.reader(infile)
writer = csv.writer(outfile)
count = 0
for row in reader:
if count == 0:
writer.writerow(row)
count += 1
elif int(row[1]) < 1000:
writer.writerow(row)
|
20,643 | 3e7580ac89e6e186583d1ca95828f4e9397a9248 | myList = []
myDict = {}
type(myList)
type(myDict)
myItem1 = 'bed'
myItem2 = 'pillow'
myItem3 = 'picture'
myBedroomItems = ['bed', 'pillow', 'picture']
myItem1Quantity = 1
myItem2Quantity = 2
myItem3Quantity = 4
myBedroomInventory = {'bed': 1, 'pillow': 2, 'picture': 4}
|
20,644 | 7ec625612e20a943264b800380c343ba01fd7bae | #!/usr/bin/env python
from nose.tools import assert_equal
from pbtranscript.io import SMRTLinkIsoSeqFiles
def test_SMRTLinkIsoSeqFiles():
f = SMRTLinkIsoSeqFiles('/root')
assert_equal(f.tasks_dir, '/root/tasks')
assert_equal(f.combined_dir, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined')
assert_equal(f.ccs_ds, '/root/tasks/pbcoretools.tasks.gather_ccsset-1/file.consensusreadset.xml')
assert_equal(f.ccs_fa_gz, '/root/tasks/pbcoretools.tasks.bam2fasta_ccs-0/ccs.gz')
assert_equal(f.flnc_gather_dir, '/root/tasks/pbcoretools.tasks.gather_contigset-2')
assert_equal(f.isoseq_flnc_ds, '/root/tasks/pbcoretools.tasks.gather_contigset-2/file.contigset.xml')
assert_equal(f.isoseq_flnc_fa, '/root/tasks/pbcoretools.tasks.gather_contigset-2/file.contigset.fasta')
assert_equal(f.nfl_gather_dir, '/root/tasks/pbcoretools.tasks.gather_contigset-3')
assert_equal(f.isoseq_nfl_ds, '/root/tasks/pbcoretools.tasks.gather_contigset-3/file.contigset.xml')
assert_equal(f.isoseq_nfl_fa, '/root/tasks/pbcoretools.tasks.gather_contigset-3/file.contigset.fasta')
assert_equal(f.draft_gather_dir, '/root/tasks/pbcoretools.tasks.gather_contigset-1')
assert_equal(f.isoseq_draft_ds, '/root/tasks/pbcoretools.tasks.gather_contigset-1/file.contigset.xml')
assert_equal(f.isoseq_draft_fa, '/root/tasks/pbcoretools.tasks.gather_contigset-1/file.contigset.fasta')
assert_equal(f.hq_isoforms_fa, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined/all.polished_hq.fasta')
assert_equal(f.hq_isoforms_fq, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined/all.polished_hq.fastq')
assert_equal(f.lq_isoforms_fa, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined/all.polished_lq.fasta')
assert_equal(f.lq_isoforms_fq, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined/all.polished_lq.fastq')
assert_equal(f.consensus_isoforms_fa, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined/all.consensus_isoforms.fasta')
assert_equal(f.ccs_report_json, '/root/tasks/pbreports.tasks.ccs_report-0/ccs_report.json')
assert_equal(f.classify_report_json, '/root/tasks/pbreports.tasks.isoseq_classify-0/isoseq_classify_report.json')
assert_equal(f.cluster_report_csv, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined/cluster_report.csv')
assert_equal(f.hq_lq_prefix_pickle, '/root/tasks/pbtranscript.tasks.separate_flnc-0/combined/all.hq_lq_pre_dict.pickle')
|
20,645 | 6eeafce71e03684fbf43caaa036cce9fad8dac04 | """ Almost a replicate of Conv2DBNActiv in chainercv.links.connection. """
import chainer
from chainer.functions import relu
from chainer.links import BatchNormalization
from chainer.links import Convolution2D
try:
from chainermn.links import MultiNodeBatchNormalization
except ImportError:
pass
class Conv2DBNActiv(chainer.Chain):
""" A Conv2DBNActiv that allow you to use custom BN function. """
def __init__(
self,
in_channels,
out_channels,
ksize=None,
stride=1,
pad=0,
dilate=1,
groups=1,
nobias=True,
initialW=None,
initial_bias=None,
activ=relu,
use_bn=True,
bn_kwargs={},
):
super().__init__()
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
self.in_channels = in_channels
self.out_channels = out_channels
self.ksize = ksize
self.stride = stride
self.pad = pad
self.dilate = dilate
self.groups = groups
self.nobias = nobias
self.initialW = initialW
self.initial_bias = initial_bias
self.use_bn = use_bn
self.bn_kwargs = bn_kwargs
self.activ = activ
with self.init_scope():
self.conv = Convolution2D(
in_channels,
out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=nobias,
initialW=initialW,
initial_bias=initial_bias,
dilate=dilate,
groups=groups,
)
# TODO: allow passing customized BN
if use_bn:
if "comm" in bn_kwargs:
self.bn = MultiNodeBatchNormalization(out_channels, **bn_kwargs)
else:
self.bn = BatchNormalization(out_channels, **bn_kwargs)
else:
self.bn = None
def __call__(self, x):
h = self.conv(x)
if self.bn is not None:
h = self.bn(h)
if self.activ is None:
return h
else:
return self.activ(h)
|
20,646 | a74c10d88b21df12596beaa6758e374093b816b2 | from utils import file_utils
def parse_line(line):
split = line.split(' ')
return Instruction(split[0], int(split[1]))
def prepare_data(file_name):
lines = prepare_lines(file_name)
return BootCode(lines)
def prepare_lines(file_name):
lines = file_utils.get_lines(file_name)
lines = [parse_line(line) for line in lines]
return lines
def solution(input_file):
boot_code = prepare_data(input_file)
boot_code.execute_all_instructions()
return boot_code.accumulator
def solution2(input_file):
lines = prepare_lines(input_file)
for index, line in enumerate(lines):
new_instruction = switch_instructions(line)
if new_instruction:
new_lines = lines.copy()
new_lines[index] = new_instruction
boot_code = BootCode(new_lines)
boot_code.execute_all_instructions()
if boot_code.has_terminated():
return boot_code.accumulator
def switch_instructions(instruction):
if instruction.operation == 'nop':
return Instruction('jmp', instruction.argument)
elif instruction.operation == 'jmp':
return Instruction('nop', instruction.argument)
class Instruction:
def __init__(self, operation, argument):
self.operation = operation
self.argument = argument
class BootCode:
def __init__(self, instructions):
self.instructions = instructions
self.accumulator = 0
self.current_position = 0
self.visited_positions = []
def execute_next_instruction(self):
self.visited_positions.append(self.current_position)
instruction = self.instructions[self.current_position]
if instruction.operation == 'nop':
self.current_position += 1
elif instruction.operation == 'acc':
self.accumulator += instruction.argument
self.current_position += 1
elif instruction.operation == 'jmp':
self.current_position += instruction.argument
def execute_all_instructions(self):
while self.no_loop() and self.is_not_terminated():
self.execute_next_instruction()
def no_loop(self):
return self.current_position not in self.visited_positions
def is_not_terminated(self):
return 0 <= self.current_position < len(self.instructions)
def has_terminated(self):
return not self.is_not_terminated() |
20,647 | 7d27dfcacc31200dd4934f9b25e75fe3c80fcf95 | print("Hello Analytics Vidhya, thank for this free course")
for i in range(3):
print("Print number " + str(i))
|
20,648 | 1ea43c688e181553254babfd0f9adaf7629f7d47 | #!/usr/bin/env python3
"""
Non-parametric Bayesian hidden Markov models. Hidden Markov models are generative
time series models. This package uses a non-parametric Bayesian estimation process
that uses dynamic numbers of latent states to avoid having to specify this number in
advance.
"""
from .hdphmm import HDPHMM
from .chain import Chain
import warnings
warnings.warn(
"bayesian_hmm is in beta testing and future versions may behave differently"
)
|
20,649 | 7f051de7fe94b527338722fe7f17d8b6171c0ac4 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.exceptions import except_orm
class characteristic(models.Model):
_name = 'characteristic'
@api.one
@api.depends('characteristic_type_id', 'value')
def _compute_name(self):
self.name = u'{}|||{}'.format(self.characteristic_type_id.name, self.value.name)
@api.one
@api.depends('value', 'uom_id')
def _compute_uom_value(self):
if self.parent_uom_id and self.uom_id:
self.uom_value = self.env['product.uom']._compute_qty_obj(self.uom_id, float(self.parent_value), self.parent_uom_id)
else:
self.uom_value = self.parent_value
characteristic_type_id = fields.Many2one('characteristic.type', string="Type", ondelete='cascade', required=True)
value = fields.Many2one('characteristic.value', string="Value", ondelete="cascade", required=False)
product_id = fields.Many2one('product.product', string='Product', required=False, ondelete='cascade')
label_id = fields.Many2one('stock.label', string='Label', required=False, ondelete='cascade', select=True)
category_id = fields.Many2one('characteristics.categories', string='Category', required=False, ondelete='cascade')
name = fields.Char(string='Name', compute='_compute_name', store=True)
model_id = fields.Many2one('characteristic', string='Model', required=False, ondelete='cascade')
uom_id = fields.Many2one('product.uom', string='UoM', required=False, ondelete='restrict')
parent_uom_category_id = fields.Many2one(related="characteristic_type_id.uom_id.category_id")
parent_uom_id = fields.Many2one(related="characteristic_type_id.uom_id")
parent_value = fields.Char(related="value.name", stored=True)
uom_value = fields.Char(string='Value', compute='_compute_uom_value')
is_created = fields.Boolean(string='Is created', default=False)
characteristic_format = fields.Selection(related="characteristic_type_id.format")
@api.model
def create(self, vals):
if vals.get('product_id'):
product_id = self.env['product.product'].browse(vals['product_id'])
# stop la création si le produit a déjà une caractéristique avec le même type
for charac in product_id.characteristics_ids:
if charac.characteristic_type_id.id == vals['characteristic_type_id']:
raise except_orm('Error', _("You can't have the same characteristic type twice"))
# lie la caractéristique à celle équivalente de la catégorie
categ_id = self.env['characteristics.categories'].search([('type_id', '=', vals['characteristic_type_id']), ('category_id', '=', product_id.categ_id.id)])
if categ_id and 'category_id' not in vals:
vals['category_id'] = categ_id.id
vals['is_created'] = True
return super(characteristic, self).create(vals)
@api.multi
def delete(self):
self.unlink()
return {'type': 'ir.actions.act_window_view_reload'}
class characteristic_type(models.Model):
"""
Type of characteristic
"""
_name = 'characteristic.type'
_description = 'Type of characteristic'
#===========================================================================
# COLUMNS
#===========================================================================
@api.model
def _format_get(self):
return [
('numerical', 'Numerical'),
('string', 'String'),
('list', 'List'),
]
name = fields.Char(required=True)
format = fields.Selection('_format_get', string='Characteristic format', required=True)
uom_id = fields.Many2one('product.uom', string='UoM', required=False, ondelete='restrict')
value_ids = fields.One2many('characteristic.value', 'type_id', string='Values')
active = fields.Boolean(string='Active', default=True)
_sql_constraints = [
('name_uniq', 'unique (name)', "Type of characteristic name already exists !"),
]
@api.multi
def name_get(self):
if self.env.context.get('from_product', False):
return [(x.id, x.name) for x in self]
return [(x.id, "%s [%s]" % (x.name, x.uom_id.name) if x.uom_id else x.name) for x in self]
class characteristics_for_category(models.Model):
"""
Relation table between characteristics and category
"""
_name = 'characteristics.categories'
_description = 'Relation table between characteristics and category'
#===========================================================================
# COLUMNS
#===========================================================================
category_id = fields.Many2one('product.category', string='Category', required=True, ondelete='restrict')
type_id = fields.Many2one('characteristic.type', string='Type', required=True, ondelete='restrict')
default_value = fields.Many2one('characteristic.value', string="Value", ondelete="cascade", required=False)
type_format = fields.Selection(related='type_id.format')
@api.onchange('default_value')
def _onchange_field(self):
if self.type_id.format == "numerical" and self.default_value:
try:
float(self.default_value.name)
except (ValueError, TypeError):
self.default_value = None
return {
'warning': {'title': _('Error'), 'message':_("Invalid format for numeric value")}
}
@api.model
def create(self, vals):
# crée les lignes correspondantes de characteristic
res = super(characteristics_for_category, self).create(vals)
product_ids = self.env['product.product'].search([('categ_id', '=', vals['category_id'])])
type_id = self.env['characteristic.type'].browse(vals['type_id'])
product_ids.write({
'characteristics_ids': [(0, 0, {
'characteristic_type_id': vals['type_id'],
'value': vals.get('default_value'),
'category_id': res.id,
'uom_id': type_id.uom_id.id,
})]
})
return res
@api.multi
def write(self, vals):
if 'default_value' in vals:
# on met à jour les valeurs par défaut
charac_ids = self.env['characteristic'].search([('category_id', '=', self.id), ('value', '=', self.default_value.id)])
charac_ids.write({'value': vals['default_value']})
return super(characteristics_for_category, self).write(vals)
class characteristic_value(models.Model):
"""
Characteristic value
"""
_name = 'characteristic.value'
_description = 'Characteristic value'
#===========================================================================
# COLUMNS
#===========================================================================
name = fields.Char(required=True, string="Value")
type_id = fields.Many2one('characteristic.type', string='Type', required=True, ondelete='cascade')
@api.one
@api.constrains('name')
def _check_name(self):
if self.type_id.format == 'numerical':
try:
float(self.name)
except ValueError:
raise except_orm('Error', _('Value is not numeric'))
@api.model
def default_get(self, fields_list):
if self.env.context.get('from_product') and self.env['characteristic.type'].browse(self.env.context['default_type_id']).format == 'list':
raise except_orm('Error', 'You can\'t add a new value to the list from a product')
return super(characteristic_value, self).default_get(fields_list)
|
20,650 | 96a28aafe2a37bc2fab9ce72558a393c0140eb26 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_sourcetraildb')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_sourcetraildb')
_sourcetraildb = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_sourcetraildb', [dirname(__file__)])
except ImportError:
import _sourcetraildb
return _sourcetraildb
try:
_mod = imp.load_module('_sourcetraildb', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_sourcetraildb = swig_import_helper()
del swig_import_helper
else:
import _sourcetraildb
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
DEFINITION_IMPLICIT = _sourcetraildb.DEFINITION_IMPLICIT
DEFINITION_EXPLICIT = _sourcetraildb.DEFINITION_EXPLICIT
SYMBOL_TYPE = _sourcetraildb.SYMBOL_TYPE
SYMBOL_BUILTIN_TYPE = _sourcetraildb.SYMBOL_BUILTIN_TYPE
SYMBOL_MODULE = _sourcetraildb.SYMBOL_MODULE
SYMBOL_NAMESPACE = _sourcetraildb.SYMBOL_NAMESPACE
SYMBOL_PACKAGE = _sourcetraildb.SYMBOL_PACKAGE
SYMBOL_STRUCT = _sourcetraildb.SYMBOL_STRUCT
SYMBOL_CLASS = _sourcetraildb.SYMBOL_CLASS
SYMBOL_INTERFACE = _sourcetraildb.SYMBOL_INTERFACE
SYMBOL_ANNOTATION = _sourcetraildb.SYMBOL_ANNOTATION
SYMBOL_GLOBAL_VARIABLE = _sourcetraildb.SYMBOL_GLOBAL_VARIABLE
SYMBOL_FIELD = _sourcetraildb.SYMBOL_FIELD
SYMBOL_FUNCTION = _sourcetraildb.SYMBOL_FUNCTION
SYMBOL_METHOD = _sourcetraildb.SYMBOL_METHOD
SYMBOL_ENUM = _sourcetraildb.SYMBOL_ENUM
SYMBOL_ENUM_CONSTANT = _sourcetraildb.SYMBOL_ENUM_CONSTANT
SYMBOL_TYPEDEF = _sourcetraildb.SYMBOL_TYPEDEF
SYMBOL_TEMPLATE_PARAMETER = _sourcetraildb.SYMBOL_TEMPLATE_PARAMETER
SYMBOL_TYPE_PARAMETER = _sourcetraildb.SYMBOL_TYPE_PARAMETER
SYMBOL_MACRO = _sourcetraildb.SYMBOL_MACRO
SYMBOL_UNION = _sourcetraildb.SYMBOL_UNION
REFERENCE_TYPE_USAGE = _sourcetraildb.REFERENCE_TYPE_USAGE
REFERENCE_USAGE = _sourcetraildb.REFERENCE_USAGE
REFERENCE_CALL = _sourcetraildb.REFERENCE_CALL
REFERENCE_INHERITANCE = _sourcetraildb.REFERENCE_INHERITANCE
REFERENCE_OVERRIDE = _sourcetraildb.REFERENCE_OVERRIDE
REFERENCE_TEMPLATE_ARGUMENT = _sourcetraildb.REFERENCE_TEMPLATE_ARGUMENT
REFERENCE_TYPE_ARGUMENT = _sourcetraildb.REFERENCE_TYPE_ARGUMENT
REFERENCE_TEMPLATE_DEFAULT_ARGUMENT = _sourcetraildb.REFERENCE_TEMPLATE_DEFAULT_ARGUMENT
REFERENCE_TEMPLATE_SPECIALIZATION = _sourcetraildb.REFERENCE_TEMPLATE_SPECIALIZATION
REFERENCE_TEMPLATE_MEMBER_SPECIALIZATION = _sourcetraildb.REFERENCE_TEMPLATE_MEMBER_SPECIALIZATION
REFERENCE_INCLUDE = _sourcetraildb.REFERENCE_INCLUDE
REFERENCE_IMPORT = _sourcetraildb.REFERENCE_IMPORT
REFERENCE_MACRO_USAGE = _sourcetraildb.REFERENCE_MACRO_USAGE
REFERENCE_ANNOTATION_USAGE = _sourcetraildb.REFERENCE_ANNOTATION_USAGE
def getSupportedDatabaseVersion():
"""getSupportedDatabaseVersion() -> int"""
return _sourcetraildb.getSupportedDatabaseVersion()
def getLastError():
"""getLastError() -> std::string"""
return _sourcetraildb.getLastError()
def clearLastError():
"""clearLastError()"""
return _sourcetraildb.clearLastError()
def open(databaseFilePath):
"""open(std::string databaseFilePath) -> bool"""
return _sourcetraildb.open(databaseFilePath)
def close():
"""close() -> bool"""
return _sourcetraildb.close()
def clear():
"""clear() -> bool"""
return _sourcetraildb.clear()
def isEmpty():
"""isEmpty() -> bool"""
return _sourcetraildb.isEmpty()
def isCompatible():
"""isCompatible() -> bool"""
return _sourcetraildb.isCompatible()
def getLoadedDatabaseVersion():
"""getLoadedDatabaseVersion() -> int"""
return _sourcetraildb.getLoadedDatabaseVersion()
def beginTransaction():
"""beginTransaction() -> bool"""
return _sourcetraildb.beginTransaction()
def commitTransaction():
"""commitTransaction() -> bool"""
return _sourcetraildb.commitTransaction()
def rollbackTransaction():
"""rollbackTransaction() -> bool"""
return _sourcetraildb.rollbackTransaction()
def optimizeDatabaseMemory():
"""optimizeDatabaseMemory() -> bool"""
return _sourcetraildb.optimizeDatabaseMemory()
def recordSymbol(serializedNameHierarchy):
"""recordSymbol(std::string serializedNameHierarchy) -> int"""
return _sourcetraildb.recordSymbol(serializedNameHierarchy)
def recordSymbolDefinitionKind(symbolId, symbolDefinitionKind):
"""recordSymbolDefinitionKind(int symbolId, DefinitionKind symbolDefinitionKind) -> bool"""
return _sourcetraildb.recordSymbolDefinitionKind(symbolId, symbolDefinitionKind)
def recordSymbolKind(symbolId, symbolKind):
"""recordSymbolKind(int symbolId, SymbolKind symbolKind) -> bool"""
return _sourcetraildb.recordSymbolKind(symbolId, symbolKind)
def recordSymbolLocation(symbolId, fileId, startLine, startColumn, endLine, endColumn):
"""recordSymbolLocation(int symbolId, int fileId, int startLine, int startColumn, int endLine, int endColumn) -> bool"""
return _sourcetraildb.recordSymbolLocation(symbolId, fileId, startLine, startColumn, endLine, endColumn)
def recordSymbolScopeLocation(symbolId, fileId, startLine, startColumn, endLine, endColumn):
"""recordSymbolScopeLocation(int symbolId, int fileId, int startLine, int startColumn, int endLine, int endColumn) -> bool"""
return _sourcetraildb.recordSymbolScopeLocation(symbolId, fileId, startLine, startColumn, endLine, endColumn)
def recordSymbolSignatureLocation(symbolId, fileId, startLine, startColumn, endLine, endColumn):
"""recordSymbolSignatureLocation(int symbolId, int fileId, int startLine, int startColumn, int endLine, int endColumn) -> bool"""
return _sourcetraildb.recordSymbolSignatureLocation(symbolId, fileId, startLine, startColumn, endLine, endColumn)
def recordReference(contextSymbolId, referencedSymbolId, referenceKind):
"""recordReference(int contextSymbolId, int referencedSymbolId, ReferenceKind referenceKind) -> int"""
return _sourcetraildb.recordReference(contextSymbolId, referencedSymbolId, referenceKind)
def recordReferenceLocation(referenceId, fileId, startLine, startColumn, endLine, endColumn):
"""recordReferenceLocation(int referenceId, int fileId, int startLine, int startColumn, int endLine, int endColumn) -> bool"""
return _sourcetraildb.recordReferenceLocation(referenceId, fileId, startLine, startColumn, endLine, endColumn)
def recordFile(filePath):
"""recordFile(std::string filePath) -> int"""
return _sourcetraildb.recordFile(filePath)
def recordFileLanguage(fileId, languageIdentifier):
"""recordFileLanguage(int fileId, std::string languageIdentifier) -> bool"""
return _sourcetraildb.recordFileLanguage(fileId, languageIdentifier)
def recordLocalSymbol(name):
"""recordLocalSymbol(std::string name) -> int"""
return _sourcetraildb.recordLocalSymbol(name)
def recordLocalSymbolLocation(localSymbolId, fileId, startLine, startColumn, endLine, endColumn):
"""recordLocalSymbolLocation(int localSymbolId, int fileId, int startLine, int startColumn, int endLine, int endColumn) -> bool"""
return _sourcetraildb.recordLocalSymbolLocation(localSymbolId, fileId, startLine, startColumn, endLine, endColumn)
def recordCommentLocation(fileId, startLine, startColumn, endLine, endColumn):
"""recordCommentLocation(int fileId, int startLine, int startColumn, int endLine, int endColumn) -> bool"""
return _sourcetraildb.recordCommentLocation(fileId, startLine, startColumn, endLine, endColumn)
def recordError(message, fatal, fileId, startLine, startColumn, endLine, endColumn):
"""recordError(std::string message, bool fatal, int fileId, int startLine, int startColumn, int endLine, int endColumn) -> bool"""
return _sourcetraildb.recordError(message, fatal, fileId, startLine, startColumn, endLine, endColumn)
# This file is compatible with both classic and new-style classes.
|
20,651 | 625626b9e2d676760e89aeaa7effeea02143e3cb | def move(n,sp,ep,boo):
if n > 1:
move(n-1,sp,6-sp-ep,boo)
print(str(sp)+" "+str(ep))
if n > 1:
move(n-1,6-sp-ep,ep,boo)
n = int(input())
boo = True
print(2**n-1)
if n <= 20:
move(n,1,3,boo)
|
20,652 | 7e6de9e4ff81c2737453d3bfac7cae837b4ac1dc | from django.conf import settings
from edc.core.identifier.classes import BaseIdentifier
class OrderIdentifier(BaseIdentifier):
def __init__(self):
identifier_format = '056{site}{sequence}'
app_name = 'mpepu_lab'
model_name = 'orderidentifierhistory'
modulus = 11
self.community = settings.SITE_CODE
super(OrderIdentifier, self).__init__(identifier_format=identifier_format,
app_name=app_name, model_name=model_name, modulus=modulus)
# def get_identifier_prep(self, **kwargs):
# """ Users may override to pass non-default keyword arguments to get_identifier
# before the identifier is created."""
# return {'community': self.community}
|
20,653 | c67fdada32ab4c6b38aecad32fe08f5cbc41b6a0 | import redis
from stests.core.cache.enums import StorePartition
from stests.core.utils import env
# Environment variables required by this module.
class EnvVars:
# Redis host.
DB = env.get_var('CACHE_REDIS_DB', 1, int)
# Redis host.
HOST = env.get_var('CACHE_REDIS_HOST', "localhost")
# Redis port.
PORT = env.get_var('CACHE_REDIS_PORT', 6379, int)
# Map: partition type -> cache db index offset.
PARTITION_OFFSETS = {
StorePartition.INFRA: 0,
StorePartition.MONITORING: 1,
StorePartition.ORCHESTRATION: 2,
StorePartition.STATE: 3,
}
def get_store(partition_type: StorePartition) -> redis.Redis:
"""Returns instance of a redis cache store accessor.
:returns: An instance of a redis cache store accessor.
"""
# Set cache db index.
db = EnvVars.DB
db += PARTITION_OFFSETS[partition_type]
# TODO: 1. cluster connections
return redis.Redis(
db=db,
host=EnvVars.HOST,
port=EnvVars.PORT
)
|
20,654 | 754d5079870f892dfd7344ba9e5373e88e06fc31 | def consecutiveNumbersSum(self, N):
"""
:type N: int
:rtype: int
"""
ans = 0
for m in range(1,N+1):
mx = N - int((m*(m-1))/2)
if mx <= 0:
break
if mx % m == 0:
ans += 1
return ans
|
20,655 | 15e5c27d4a87c2640828fa057f4980482ca941e8 | # -*- coding: utf-8 -*-
from time import strptime
from datetime import datetime, date, time
from django.conf import settings
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from contract.models import *
from person.models import *
from employees.models import Employee, Visits as eVisits
from finance.models import *
from finance.forms import *
from .models import *
from .forms import *
day_name = "понедельник вторник среда четверг пятница суббота воскресенье"
day_name = day_name.split()
abc = ("А","Б","В","Г","Д","Е","Ё","Ж","З","И","К",
"Л","М","Н","О","П","Р","С","Т","У","Ф","Х",
"Ц","Ч","Ш","Щ","Э","Ю","Я",)
@login_required(login_url='/login/')
def guest_visit(request, id=0, ):
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
b_url = reverse('r_guest_card', args=(guest.pk, ))
if request.method == 'POST':
post_val = request.POST.copy()
post_val['date'] = datetime.now()
f = FormInvitation(post_val)
if f.is_valid():
f.save()
return HttpResponseRedirect(b_url)
else:
return HttpResponse(f.errors)
context_dict = dict(request=request, g=guest, b_url=b_url)
context_dict.update(csrf(request))
return render_to_response('guest_visit.html', context_dict)
@login_required(login_url='/login/')
def cashier(request, ):
p_title='Работа с кассой'
cashhost = settings.CASHIER_HOST
context_dict = dict(request=request, p_title=p_title, cashhost=cashhost,)
return render_to_response("cashier.html", context_dict)
@login_required(login_url='/login/')
def guest_card(request, id=0, act=None ):
b_url = reverse('r_guest')
p_title = 'Личная карта гостя'
cashhost = settings.CASHIER_HOST
try:
guest = Guest.objects.get(pk=id)
except Guest.DoesNotExist:
o_name = 'Гость'
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
try:
v = GuestVisits.objects.get(guest=guest, is_online=-1)
guest.is_online = True
except GuestVisits.DoesNotExist:
v = ""
guest.is_online = False
if act == 'inout':
guest.is_online = not guest.is_online
if guest.is_online:
v = GuestVisits(date_start=datetime.now(),
locker=request.POST['locker'],
date_end=None,
guest=guest)
v.save()
else:
i = Invitation.objects.filter(guest=guest, is_free=True)[0]
i.is_free = False
i.save()
v.out()
v = ""
visits = GuestVisits.objects.filter(guest=guest).order_by('date_start')
credits = Credits.objects.filter(guest=guest).order_by('plan_date')
context_dict = dict(request=request, b_url=b_url, p_title=p_title, guest=guest,
v=v, visits=visits, credits=credits, cashhost = cashhost)
context_dict.update(csrf(request))
return render_to_response("guest_card.html", context_dict)
@login_required(login_url='/login/')
def clientinvite(request,):
lst = []
ct = ContractType.objects.filter(period_days__in=[182, 365])
if 'query' in request.GET.keys():
query = request.GET.get('query')
if len(query) > 0:
clnts = Client.objects.filter(last_name__icontains=query).order_by("last_name")
for c in Contract.objects.filter(contract_type__in=ct,
is_current=1, client__in=clnts):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
else:
for c in Contract.objects.filter(contract_type__in=ct, is_current=1):
invites = Invitation.objects.filter(contract=c)
lst.append((c, invites))
context_dict = dict(lst=lst, )
return render_to_response("client_invite.html", context_dict)
@login_required(login_url='/login/')
def guest(request, id=-1, act=None):
b_url = reverse('r_guest')
p_title = 'Гость'
lst = []
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['manager'] = request.user.pk
post_values['is_client'] = 0
post_values['date'] = datetime.now().date()
d = strptime(post_values['born'],"%d.%m.%Y")
post_values['born'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
form = FormGuest(post_values)
if form.is_valid():
# try:
f = form.save()
# except Exception:
# context_dict = dict(form=form)
# return render_to_response("form_err.html", context_dict)
else:
f = form.errors
if 'contract' in post_values.keys():
try:
c_pk = int(post_values['contract'])
except ValueError:
c_pk = 0
if c_pk > 0:
post_values['guest'] = f.pk
post_values['date'] = datetime.now()
post_values['is_free'] = True
fi = FormInvitation(post_values)
if fi.is_valid():
fi.save()
else:
fi = fi.errors
url = reverse('r_guest', args=(0, ))
return HttpResponseRedirect(url)
context_dict = dict(request=request, p_title=p_title, b_url=b_url, )
context_dict.update(csrf(request))
return render_to_response("guest_add.html", context_dict)
if 'query' in request.GET.keys():
query = request.GET.get('query')
lst = Guest.objects.filter(lastname__icontains=query).order_by("lastname")
elif id > -1:
lst = Guest.objects.filter(lastname__istartswith=abc[int(id)]).order_by("lastname")
else:
lst = Guest.objects.all().order_by("lastname")
context_dict = dict(request=request, lst=lst, abc=abc, id=id)
context_dict.update(csrf(request))
return render_to_response("guest.html", context_dict)
@login_required(login_url='/login/')
def reminder(request, id=0, act=None):
b_url = reverse('reminder')
p_title = 'Напоминание'
if act == 'add':
if request.method == 'POST':
post_values = request.POST.copy()
post_values['author'] = request.user.pk
t = strptime(request.POST['time'],"%H:%M")
post_values['time'] = time(t.tm_hour, t.tm_min)
post_values['is_everyday'] = False
post_values['wdays'] = ""
post_values['group1'] = int(post_values['group1'])
if post_values['group1'] == 1:
post_values['is_everyday'] = True
elif post_values['group1'] == 2:
d = strptime(request.POST['date'],"%d.%m.%Y")
post_values['date'] = date(d.tm_year, d.tm_mon, d.tm_mday,)
elif post_values['group1'] == 3:
for i in xrange(0,7):
if "wday" + str(i) in post_values.keys():
post_values['wdays'] += str(i) + ","
form = FormReminder(post_values)
if form.is_valid():
form.save()
return HttpResponseRedirect(b_url)
else:
p_title = form.errors
context_dict = dict(request=request, p_title=p_title, b_url=b_url, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder_add.html", context_dict)
elif id > 0:
try:
r = Reminder.objects.get(pk=id)
except Reminder.DoesNotExist:
o_name = p_title
context_dict = dict(request=request, o_name=o_name, b_url=b_url)
return render_to_response("err404.html", context_dict)
if act == 'del':
r.delete()
elif act == 'read':
r.read(request.user)
lst = []
for r in Reminder.objects.all().order_by('is_everyday','date','wdays'):
if r.is_everyday:
lst.append((r,1))
elif r.date:
lst.append((r,2))
else:
wl = [int(x) for x in r.wdays[:-1].split(',')]
lst.append((r,wl))
context_dict = dict(request=request, lst=lst, week=day_name)
context_dict.update(csrf(request))
return render_to_response("reminder.html", context_dict)
@login_required(login_url='/login/')
def bithday(request):
if request.method == 'POST':
born = strptime(request.POST['born_date'],"%d.%m")
d = born.tm_mday
m = born.tm_mon
rdate = date(datetime.now().year,m,d,)
else:
d = datetime.now().day
m = datetime.now().month
rdate = datetime.now()
c = Contract.objects.filter(is_current=True).values('client')
lst = Client.objects.filter(born_date__month=m, born_date__day=d, pk__in=c).order_by("last_name")
context_dict = dict(request=request, lst=lst, rdate=rdate)
context_dict.update(csrf(request))
return render_to_response("bithday.html", context_dict)
@login_required(login_url='/login/')
def clients_login(request,):
lst = []
employees = []
if request.method == 'POST':
try:
find = long(request.POST.get('lastname'))
except ValueError:
find = request.POST.get('lastname')
if isinstance(find, long):
res = Contract.objects.filter(card=find, is_current=1)
# if not find in the current try find in the prospect
if res.count() < 1:
res = Contract.objects.filter(card=find, is_current=2)
employees = Employee.objects.filter(card=find,)
else:
ac = Contract.objects.filter(is_current__in=[1, 2]).values('client')
res = Client.objects.filter(last_name__icontains=find, pk__in=ac)
employees = Employee.objects.filter(lastname__icontains=find)
if res.count() + employees.count() == 1:
if employees:
url = reverse('e_comein', args=(employees[0].pk, ))
else:
try: # if contract
url = reverse('person_card',args=[res[0].client.pk])
except AttributeError:
url = reverse('person_card',args=[res[0].pk])
return HttpResponseRedirect(url)
else:
lst = res
context_dict = dict(request=request, lst=lst, employees=employees)
context_dict.update(csrf(request))
return render_to_response("client_login.html", context_dict, )
@login_required(login_url='/login/')
def clients_online(request,):
lst = []
for v in Visits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(client=v.contract.client).count()
lst.append((debts,v))
glst = []
for gv in GuestVisits.objects.filter(is_online=-1).order_by('date_start'):
debts = Credits.objects.filter(guest=gv.guest).count()
glst.append((debts, gv))
elst = eVisits.objects.filter(date_end__isnull=True).order_by('date_start')
context_dict = dict(request=request, lst = lst, glst=glst, elst=elst)
return render_to_response("online.html", context_dict, )
@login_required(login_url='/login/')
def reception_menu(request,):
Y = datetime.today().year
m = datetime.today().strftime("%m")
d = datetime.today().strftime("%d")
context_dict = dict(request=request, Y=Y, m=m, d=d, )
return render_to_response("reception_menu.html", context_dict, )
|
20,656 | a5a167cb64856842a8bd6ecd618470047b51bb05 | import numpy as np
import math
import scipy.io
from sklearn.model_selection import KFold
# data_train.shape => (330, 33)
# label_train.shape => (330, 1)
# data_test.shape => (21, 33)
def SOM(dataset, labelset, label_set, j,pn = 16,lr = 1e-5, iternum=0, resume=False):
n, d = dataset.shape
if resume:
cv = np.load('{}_{}_center_vector.npy'.format(pn, label_set))
else:
cv = 1 - 2 * np.random.random([j ,d])
change = 10000
cv_old = cv.copy()
while change > 1e-5:
data_idx = np.random.randint(0, n)
label = labelset[data_idx]
if label == label_set:
data = dataset[data_idx]
diff = np.sum(np.square(cv - data), axis=1)
idx = np.argmin(diff)
cv[idx] = cv[idx] + lr * (data - cv[idx])
iternum += 1
if iternum % 1e3 == 0:
change = np.sum(np.abs(cv - cv_old))
cv_old = cv.copy()
print(label_set, change, iternum)
np.save('{}_{}_center_vector.npy'.format(pn, label_set), cv)
return cv
def SOM_c(data, label, m, resume):
n, d = data.shape
label = label.tolist()
labels = []
for i in label:
if i not in labels:
labels.append(i)
c = []
target = []
# Using SOM method to estimate C and its label
for i in range(len(labels)):
j = round(label.count(labels[i])/n * m)
c.append(SOM(data, label, labels[i], j, m, lr=1e-2,iternum=0,resume=resume))
target.append(np.array([labels[i]]*j))
c = np.concatenate((c[0], c[1]))
target = np.concatenate((target[0], target[1]))
return c, target
def Gaussian(x, c, t):
return math.exp(-1 * np.sum(np.square(x - c))/(2 * t**2))
def EstimateC(data, label, pn=30, pretrained=False):
print('Getting center vector...')
if pretrained:
return np.load('{}_center_vector.npy'.format(pn))
n, d = data.shape
e = np.zeros(n)
candi = [i for i in range(0, 330)]
for i in range(0, n):
c = data[i]
o, w = EstimateOW(data, c, 0.707, label)
f = np.dot(o, w)
e[i] = 1/2 * np.sum(np.square(f - label))
first = np.argmin(e)
err = e[first]
old_err = np.Inf
c = data[first].reshape((1,-1))
candi.pop(first)
m = 1
# print('round:{} error:{:.2f}\n'.format(m, err))
while m < pn and err <= old_err and np.abs(err - old_err) > 0.15:
m += 1
old_err = err
e = np.Inf * np.ones(n)
for k in range(0, n - m):
i = candi[k]
nc = np.concatenate((c, data[i].reshape(1,-1)), axis=0)
t = EstimateT(nc, m)
o, w = EstimateOW(data, nc, t, label)
f = np.dot(o, w)
e[i] = 1/2 * np.sum(np.square(f - label))
first = np.argmin(e)
err = e[first]
c = np.concatenate((c, data[first].reshape(1,-1)), axis=0)
candi.pop(candi.index(first))
# print('round:{} error:{:.2f}\n'.format(m, err))
print('Number of center vector:{}, saving'.format(m))
np.save('{}_center_vector.npy'.format(m), c)
return c
def EstimateT(c, m):
# Estimate the parameter of Gaussian
dis = [0]*m
for i in range(m):
for j in range(i, m):
dis[j] = max(dis[j], np.sqrt(np.sum(np.square(c[i] - c[j]))))
t = max(dis)/np.sqrt(2*m)
return t
def getO(data, c, t):
m = c.shape[0]
n, d = data.shape
o = [[0]*m for i in range(n)]
for i in range(n):
for j in range(m):
o[i][j] = Gaussian(data[i], c[j], t)
o = np.array(o).reshape(n, m)
return o
def EstimateOW(data, c, t, label):
# Estimate W
n, d = data.shape
m = c.shape[0]
o = getO(data, c, t)
w = np.dot(np.dot(np.linalg.pinv((np.dot(o.T,o))),o.T),np.array(label))
return o, w
def LinearRBF(data, label, pn, pretrained=False, SOM=False):
if SOM:
c, target = SOM_c(data, label, pn, pretrained)
else:
c = EstimateC(data, label, pn=pn, pretrained=pretrained)
m, _ = c.shape
t = EstimateT(c, m)
o, w = EstimateOW(data, c, t, label)
return c, w, t
def Dataloader():
train_data = scipy.io.loadmat('data_train.mat')['data_train']
train_label = scipy.io.loadmat('label_train.mat')['label_train']
test_data = scipy.io.loadmat('data_test.mat')['data_test']
return train_data, train_label, test_data
def Train(data_train, label_train, pn=4, pretrained=False, SOM=False):
c, w, t = LinearRBF(data_train, label_train, pn=pn , pretrained=pretrained, SOM=SOM)
m, d = c.shape
o = getO(data_train, c, t)
f = np.dot(o, w)
label_train = np.heaviside(label_train, 0.5)
f = np.heaviside(f, 0.5)
err = 0
n, _ = label_train.shape
for i in range(0, n):
if label_train[i] != f[i]:
err += 1
print('Train accuracy is {:.2f}%'.format(100 * (1 - err/n)))
return c, w, t
def Evaluate(data_test, label_test, c, w, t, mode='t'):
o = getO(data_test, c, t)
f = np.dot(o, w)
f = np.heaviside(f, 0.5)
err = 0
if mode == 't':
label_test = np.heaviside(label_test, 0.5)
print('Truth is {}'.format(label_test.reshape(1, -1)))
print('Result is {}'.format(f.reshape(1,-1)))
n, _ = label_test.shape
for i in range(0, n):
if label_test[i] != f[i]:
err += 1
print('Test accuracy is {:.2f}%'.format(100 * (1 - err/n)))
return 1 - err/n
if mode == 'e':
print('Result is {}'.format(f.reshape(1,-1)))
return
def Method(train_data, train_label, test_data, getCV=False, pn=16, SOM=False):
kf = KFold(5, shuffle=True, random_state=42)
rr = 1
if getCV:
best_pn = 0
best_score = 0
for pn in range(2, 20):
scores = []
rr = 1
for train_index, test_index in kf.split(train_data):
print('========================== The {}th experiment with pn={} =========================='.format(rr, pn))
rr += 1
data_train, label_train = train_data[train_index], train_label[train_index]
data_test, label_test = train_data[test_index], train_label[test_index]
print('Start Training...')
c, w, t = Train(data_train, label_train, pn, pretrained=False, SOM=SOM)
print('Start Evaluating..')
score = Evaluate(data_test, label_test, c, w, t, mode='t')
scores.append(score)
mean_score = np.mean(np.array(scores))
print('The mean score with pn={} is {}\n'.format(pn, mean_score))
if mean_score > best_score:
best_pn = pn
best_score = mean_score
print('The best pn is {}, with the best score: {}'.format(best_pn, best_score))
else:
c, w, t = Train(train_data, train_label, pn, pretrained=True, SOM=SOM)
print('pn is: {}; t is: {:.4f}'.format(pn, t))
Evaluate(test_data, None, c, w, t, mode='e')
if __name__ == "__main__":
'''
参数都是一脉相承的
pn 表示设定的CV的数量
m 是计算过程中实际用到的CV的数量
c 是CV
w 是权重
t 是高斯参数
'''
print('Loading data')
train_data, train_label, test_data = Dataloader()
# 得结果用这个
getCV = False
pn = 17
# # 调参用这个
# getCV = True
# pn = 15
# Method2( train_data, train_label, test_data, getCV=getCV, pn=pn)
Method(train_data, train_label, test_data, getCV=getCV, pn=pn, SOM=True) |
20,657 | 795da10567d6778c4afb6dcaa249412c1d8b54de | # -*- coding: utf-8 -*-
# Copyright (c) 2007-2013 NovaReto GmbH
# cklinger@novareto.de
from bgetem.sqlcontainer.models import PloneSQLModel
from bgetem.sqlcontainer.dexterity import ContentFTI, AddForm
from collective.z3cform.datagridfield import DataGridFieldFactory
from five import grok
from nva.hhs import interfaces
from plone.formwidget.autocomplete import AutocompleteFieldWidget
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation, relationship
from z3c.form.browser.checkbox import CheckBoxFieldWidget
from z3c.form.interfaces import IEditForm
from z3c.form.object import registerFactoryAdapter
from zope.interface import implementer, implementsOnly, implementedBy
from zope.publisher.interfaces.browser import IBrowserPage
from App.class_init import InitializeClass
from .. import Base
@implementer(interfaces.ICategory)
class Category(Base):
__tablename__ = "categories"
id = Column('id', String(50), primary_key=True)
name = Column('name', String(50))
products = relationship(
"Product", secondary='products_categories', collection_class=set)
class ProductCategory(Base):
__tablename__ = 'products_categories'
product_id = Column(Integer, ForeignKey('products.id'), primary_key=True)
category_id = Column(Integer, ForeignKey('categories.id'), primary_key=True)
@implementer(interfaces.IProducer)
class Producer(Base):
grok.title("Producer")
portal_type = "producer"
__tablename__ = 'producers'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
street = Column('street', String(50))
city = Column('city', String(50))
zip = Column('zip', String(50))
www = Column('hompage', String(50))
email = Column('email', String(50))
products = relation("Product", backref="producer")
def getId(self):
return self.name
class ProducerWrapper(PloneSQLModel):
pass
class ProducerFTI(ContentFTI):
grok.name('producer')
__model__ = Producer
schema = interfaces.IProducer
klass = "nva.hhs.content.Producer"
class ProducerAddForm(AddForm):
grok.name('producer')
@implementer(interfaces.IProduct)
class Product(Base):
grok.title("Product")
portal_type = "product"
__tablename__ = 'products'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
product_id = Column('product_id', String(50))
producer_id = Column('producer_id', Integer, ForeignKey('producers.id'))
categories = relationship(
"Category", secondary='products_categories', collection_class=set)
def getId(self):
return self.name
class ProductWrapper(PloneSQLModel):
pass
class ProductFTI(ContentFTI):
grok.name('product')
__model__ = Product
schema = interfaces.IProduct
klass = "nva.hhs.content.Product"
class ProductAddForm(AddForm):
grok.name('product')
def updateWidgets(self):
self.fields['categories'].widgetFactory = CheckBoxFieldWidget
self.fields['producer'].widgetFactory = AutocompleteFieldWidget
self.fields['hazards'].widgetFactory = DataGridFieldFactory
#self.fields["variables"].widgetFactory
super(AddForm, self).updateWidgets()
# Enable/Disable the insert button on the right
self.widgets['hazards'].allow_insert = True
# Enable/Disable the delete button on the right
self.widgets['hazards'].allow_delete = True
# Enable/Disable the auto-append feature
self.widgets['hazards'].auto_append = False
# Enable/Disable the re-order rows feature
self.widgets['hazards'].allow_reorder = False
from plone.dexterity.browser import add, edit
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from plone.dexterity.interfaces import IDexterityFTI
from Acquisition import Explicit, ImplicitAcquisitionWrapper
class ProductEditForm(edit.DefaultEditForm):
def getContent(self):
return self.context.content
def updateWidgets(self):
self.fields['categories'].widgetFactory = CheckBoxFieldWidget
self.fields['producer'].widgetFactory = AutocompleteFieldWidget
self.fields['hazards'].widgetFactory = DataGridFieldFactory
super(ProductEditForm, self).updateWidgets()
@implementer(interfaces.IHazard)
class Hazard(Base):
grok.title("Hazard")
portal_type = "hazard"
__tablename__ = 'hazards'
id = Column('id', String(50), primary_key=True)
type = Column('name', String(50))
timespan = Column('category', String(50))
product_id = Column('product_id', Integer, ForeignKey('products.id'))
product = relation("Product", backref="hazards")
def reindexObject(self, *args, **kwargs):
pass
class HazardFTI(ContentFTI):
grok.name('hazard')
__model__ = Hazard
schema = interfaces.IHazard
klass = "nva.hhs.content.Hazard"
class HazardAddForm(AddForm):
grok.name('hazard')
registerFactoryAdapter(interfaces.IHazard, Hazard)
|
20,658 | 17fa38cbd88c025be828de433de7d7b95d6eecdc | import time
import random
import string
import urlparse
import urllib
import hashlib
import tornado.web
import tornado.template
import tornado.auth
import tornado.escape
from setting import settings
import functools
from tornado import httpclient
from tornado import escape
class WeiboMixin(tornado.auth.OAuth2Mixin):
_OAUTH_ACCESS_TOKEN_URL = "https://api.weibo.com/oauth2/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.weibo.com/oauth2/authorize"
@tornado.web.asynchronous
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
http = httpclient.AsyncHTTPClient()
fields = set()
if extra_fields:
fields.update(extra_fields)
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
"grant_type": "authorization_code"
}
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
self.async_callback(self._on_access_token, redirect_uri, client_id, client_secret, callback, fields),
method="POST", body=urllib.urlencode(args))
@tornado.web.asynchronous
def _on_access_token(self, redirect_uri, client_id, client_secret,
callback, fields, response):
session = escape.json_decode(response.body)
callback(session)
class WeiboHandler(tornado.web.RequestHandler,
WeiboMixin):
@tornado.web.asynchronous
def get(self):
redirect_uri = "%s://%s%s" % (self.request.protocol, self.request.host, self.request.path)
code = self.get_argument("code", None)
if code:
self.get_authenticated_user(redirect_uri, settings["WeiboAppKey"], settings["WeiboAppSecret"],
code, self._on_auth)
return
self.authorize_redirect(redirect_uri,
client_id=settings["WeiboAppKey"],
extra_params={"response_type": "code"})
def _on_auth(self, session):
self.finish(session)
class LogoutHandler(tornado.web.RequestHandler):
def get(self):
self.redirect_url = self.get_argument("next", "/")
self.clear_cookie("user")
self.redirect(self.redirect_url)
|
20,659 | 33b4d280f01c1fd96e385591c6cd09f4a2342d26 | from random import randint
# while True:
# print('It\'s to long...')
informed_number = -1
secret_number = randint(0, 9)
while informed_number != secret_number:
informed_number = int(input('Number: '))
print('Secret number {} was matched!'.format(secret_number))
|
20,660 | 8dfe4f1720a838c3ce6bc7dfdb6d109247caca8c | #test
#print('hello python');
#x = 1
# while x < 100:
# x += 1
# print(x)
# if x < 10:
# x += 1
# print(x)
# message = 'hello python'
# print(message)
#message = 'hello python'
#print(message.title())
#转大写
#print(message.upper())
#转小写
#print(message.lower())
#z字符串拼接
# first_name = 'wang'
# last_name = 'zhongyu'
# full_name = first_name + ' ' + last_name
# print(full_name)
#换行符
#print("people:\nzhongyu;\nmeina")
list = ['name','age','sex'];
#去除字符串左右的空格
# name = ' meina '
# print(name)
# print(name.rstrip())
# print(name.lstrip())
#小试:个性化消息: 个性化消息: 将用户的姓名存到一个变量中,并向该用户显示一条消息。显示的消息应非常简单,如“Hello Eric, would you like to learn some Python today?”。
# name = 'John'
# message = 'Hello ' + name + ', would you like to learn some Python today?'
# print(message)
#2-4 调整名字的大小写: 调整名字的大小写: 将一个人名存储到一个变量中,再以小写、大写和首字母大写的方式显示这个人名
# name = 'simith jack'
# print(name.title())
# print(name.upper())
# print(name.lower())
#找一句你钦佩的名人说的名言,将这个名人的姓名和他的名言打印出来
# message = 'Albert Einstein once said, “A person who never made a mistake never tried anything new.”'
# print(message) #注意引号的使用
#计算
# num_1 = 3
# num_2 = 4
# res = num_1*num_2
# res_1 = num_1/num_2
# res_2 = num_1 ** num_1
# print(res)
# print(res_1)
# print(res_2)
# age = 27
# message = 'you are ' + str(age) + ' years old'
# print(message)
#报错
# message_1 = 'you are ' + age + ' years old'
# print(message)
#列表
# list = ['meina','zhongyu','asan'];
# print(list)
# print(list[0])
# print(list[1])
# print(list[0].title())
# print(list[-1].upper())
#内容替换
# list[1] = 'lisi'
# print(list[0])
# print(list[1])
# print(list[2])
#末尾添加元素
# list.append('jack')
# print(list[3])
# print(list)
#指定位置添加元素
# list.insert(0,'john')
# print(list)
#删除指定位置元素
# del(list[0])
# print(list)
#删除末尾元素
# print(list.pop())
#remove
# list = ['xiaoming','xiaohong','xiaolv','xiaohei']
# re_name = 'xiaohei'
# list.remove(re_name)
# print(list)
#排序 (永久改变顺序排序)
# names = ['xiaoming','xiaolan','xiaohei']
# res = names.sort()
# print(names)
#(临时改变书序排序sorted)
#翻转列表
# res = list.reverse()
# print(list)
#获取列表长度
#print(len(list))
#for 循环
# names = ['zhangsan','lisi','wangwu'];
# for name in names:
# print(name)
#range函数
# for value in range(1,7):
# print(value)
#基本函数
# numbers = [1,1,2,4,7,0,9,56]
# print(min(numbers))
# print(max(numbers))
# print(sum(numbers))
#平方数
# values = [value**2 for value in range(1,11)]
# print(values)
#列表切片
# numbers = [1,1,2,4,7,0,9,56]
# print(numbers[1:5])
#复制切片
# numbers = [1,1,2,4,7,0,9,56]
# num = numbers[:];
# print(num)
|
20,661 | aecc70f418887f57f4ea2d09834ee36caace67fb | from connect4 import Connect4
text_file = open("Output4.txt", "w")
text_file.write("4 goes first")
i_win = 0
j_win = 0
tie = 0
for n in range(30):
game = Connect4(3, 4, 3, 2)
result = game.evaluate()
if result == "X":
i_win += 1
elif result == "O":
j_win += 1
else:
tie += 1
text_file.write("Heuristic 4: %d,\nHeuristic 2: %d, \nTie: %d\n" % (i_win, j_win, tie))
text_file.write("2 goes first")
i_win = 0
j_win = 0
tie = 0
for n in range(30):
game = Connect4(3, 2, 3, 4)
result = game.evaluate()
if result == "X":
i_win += 1
elif result == "O":
j_win += 1
else:
tie += 1
text_file.write("Heuristic 2: %d,\nHeuristic 4: %d, \nTie: %d\n" % (i_win, j_win, tie))
text_file.close()
|
20,662 | 155336265ab13b037fe3a678d1a323783c558281 | from datetime import datetime
from typing import Optional
from fastapi import Depends, HTTPException
from sqlalchemy.orm import Session
from sqlalchemy.exc import IntegrityError
from auth import verify_id_token, UserTokendata
from apps.Tickets import Schemes
from apps.Tickets import Models
from apps.Users.Models import Users
from db import get_session
from sqlalchemy.orm.exc import NoResultFound
import firebase_admin
import firebase_admin.auth
import firebase_admin.exceptions
def maxTickets(uid: str, session:Session) -> int:
userData = session.query(Users).filter(Users.uid == uid).one_or_none()
if userData:
return userData.ticketmax
raise HTTPException(status_code= 400, detail = "No user found. try again.")
def nowTickets(uid: str, session:Session) -> int:
tickets = session.query(Models.Tickets).filter(Models.Tickets.uid == uid).count()
return tickets
async def create_ticket(
ticketin: Schemes.TicketIn,
token: UserTokendata = Depends(verify_id_token),
session: Session = Depends(get_session)
) -> Schemes.TicketOut:
if maxTickets(token.user_id, session) <= nowTickets(token.user_id, session):
raise HTTPException(status_code= 422)
if ticketin.volumemax > 100:
raise HTTPException(status_code=400, detail="the volume of tickets be under 100.")
timestamp = datetime.now()
adds = Models.Tickets(
**ticketin.dict(),
uid = token.user_id,
timestamp = timestamp
)
session.add(adds)
session.commit()
session.refresh(adds)
return Schemes.TicketOut(
**session.query(Models.Tickets).filter(
Models.Tickets.uid == token.user_id,
Models.Tickets.name == ticketin.name
).one().__dict__
)
async def volume_of_all_tickets(
token: UserTokendata = Depends(verify_id_token),
session: Session = Depends(get_session)) -> int:
tickets_volume = session.query(Models.Tickets).count()
return tickets_volume
async def get_tickets(
uid: Optional[str] = None,
skip: Optional[int] = 0,
limit: Optional[int] = 100,
token: UserTokendata = Depends(verify_id_token),
session: Session = Depends(get_session)
):
volume = session.query(Models.Tickets).count()
exception = HTTPException(status_code=400, detail = f"over limit. volume: {volume}")
if skip > volume:
raise exception
query = session.query(Models.Tickets)
if uid:
query = query.filter(Models.Tickets.uid == uid)
query = query.limit(limit)
query = query.offset(skip)
return list(map(lambda x: Schemes.TicketOut(**x.__dict__), query.all()))
async def get_ticket(
ticketid: int,
token: UserTokendata = Depends(verify_id_token),
session: Session = Depends(get_session)
):
ticket = session.query(Models.Tickets).filter(Models.Tickets.ticketid == ticketid).one()
return Schemes.TicketOut(**ticket.__dict__) |
20,663 | 61917c40385d81ef14bc19f76ef27ada3aebb7db | #!/usr/bin/python2.7
##-*- coding: utf-8 -*-
import re # for judge the name correct
import os # for get size of the file
import sys
import getpass
import subprocess
import time
import random
import socket
import ftplib # package that deal with the ftp
import Utils # some useful class and functions
if sys.version > '2.6':
from hashlib import md5
else:
import md5
# function: conditions ? a : b
def ThreeOperator(conditions, a, b):
return (conditions and [a] or [b])[0]
# the import imformation
def hdfsDirectory(template, startDate, endDate = None, flag = 'DAY'):
if not isinstance(startDate, Utils.DateTimeUtil):
return ''
if flag == 'DAY':
return __hdfsDirectoryDay(template, startDate, endDate)
elif flag == 'MONTH':
return __hdfsDirectoryMonth(template, startDate, endDate)
def __hdfsDirectoryDay(template, startDate, endDate = None):
if not endDate or not isinstance(endDate, Utils.DateTimeUtil):
endDate = Utils.DateTimeUtil.dayDelta_s(startDate, days = 1)
# one month or the one year
if startDate.firstDay() and endDate.firstDay():
month = "%02d"%(startDate.month())
if (endDate.month() - startDate.month() == 1) and \
endDate.year() == startDate.year():
return template%("%04d"%(startDate.year()), month, '*')
if startDate.firstDay(flag = 'YEAR') and endDate.firstDay(flag = 'YEAR') and \
(endDate.year() - startDate.year() == 1):
return template%("%04d"%(startDate.year()), '*', '*')
# return the list
dirs, sd = [], startDate.copy()
while( sd < endDate ):
dirs.append(template%("%04d"%(sd.year()), "%02d"%(sd.month()), "%02d"%(sd.day())))
sd.tomorrow() # add one day
if len(dirs) == 1:
return dirs[0]
return dirs
def __hdfsDirectoryMonth(template, startDate, endDate = None):
if not endDate or not isinstance(endDate, Utils.DateTimeUtil):
endDate = Utils.DateTimeUtil.dayDelta_s(startDate, months = 1)
# one month or the one year
if startDate.firstMonth() and endDate.firstMonth() and \
(endDate.year() - startDate.year() == 1):
return template%("%04d"%(startDate.year()), '*')
# return the list
dirs, sd = [], startDate.copy()
while( sd < endDate ):
dirs.append(template%("%04d"%(sd.year()), "%02d"%(sd.month())))
sd.next() # add one month
if len(dirs) == 1:
return dirs[0]
return dirs
# judge if exec or just template the string
def __execOrTemplate(string, params):
if callable(string):
return string(params)
return string%(params)
# generate Params
def __makeParams(params, pParams):
for pms in pParams:
cParams, levels = params, pms.split('/')
lastLevel = levels[-1]
for level in levels[:-1]:
if level in cParams: cParams = params[level]
if lastLevel in cParams:
cParams[lastLevel] = __execOrTemplate(cParams[lastLevel],
pParams[pms])
return params
def initResultsCommand(code = 0, command = '', stderr = '',
stdout = '', stdin = '',
environ = os.environ.copy()):
return __initResultsCommand__(code = code, command = command,
stderr = stderr, stdout = stdout,
stdin = stdin, environ = environ)
def __initResultsCommand__(code = 0, command = '', stderr = '',
stdout = '', stdin = '',
environ = os.environ.copy()):
return {
'CODE': code,
'STDERR': stderr,
'STDOUT': stdout,
'STDIN': stdin,
'COMMAND': command,
'DATE': Utils.DateTimeUtil.now(),
'ENVIRON': environ,
'USER': getpass.getuser(),
}
def __printRunCommandResults__(t, printString, results):
if results['CODE'] == 0:
print('Execute Command SUCCESS [%s]: %s'%(t, printString))
else:
print('Execute Command FAILED [%s]: %s\n%s'%(t, printString, results['STDERR']))
# execute a command
def runCommand(command, execute = True, exit = False,
environ = os.environ.copy(),
stdin = None, stdout = None, stderr = None,
printString = '', printResults = False, **params):
# the environment of the execution
basicEnv = os.environ.copy()
basicEnv.update(environ)
# if the command is list or tuple object
# we use the space to join the list and turn it to a string
if isinstance(command, (list, tuple)):
command = ' '.join(command)
results = __initResultsCommand__(environ = basicEnv,
command = ThreeOperator(printString, printString,
command))
# the command must be string till this step
if not isinstance(command, str):
results['CODE'] = 1
return results
# print the command need to be execute
printString = ThreeOperator(printString, printString, command)
print("Execute Command [%s]: "%(Utils.DateTimeUtil.now()) + printString)
# need to execute the command
if execute == True:
child = subprocess.Popen(command, shell = True, env = basicEnv,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
if isinstance(stdin, str):
results['STDIN'] = stdin
out = child.communicate(stdin)
else: out = child.communicate()
# record the stdout and stderr string
results['STDOUT'] = out[0]
results['STDERR'] = out[1]
# execute function to deal with the stdout
if hasattr(stdout, '__call__'):
for line in out[0].splitlines():
stdout(line)
# execute function to deal with the stderr
if hasattr(stderr, '__call__'):
for line in out[1].splitlines():
stderr(line)
results['CODE'] = child.returncode
# print the results of the command
if printResults: __printRunCommandResults__(Utils.DateTimeUtil.now(),
printString,
results)
# return the result of the execution of the command
return results
def Dstring(params):
DString = ""
# must be tuple or list, the format is ((NAME, value), (NAME, value), ... )
# or [(NAME, value), (NAME, value), ... ]
if isinstance(params, (tuple, list)):
for value in params:
if len(value) >= 2:
if isinstance(value[1], (int, float)):
DString += "-D %s=%s "%(value[0], value[1])
else:
DString += "-D %s='%s' "%(value[0], value[1])
# or the type of params['D'] is dict
# the format is {NAME: 'value'}
elif isinstance(params, dict):
for key in params:
if isinstance(params[key], (int, float)):
DString += "-D %s=%s "%(key, params[key])
else:
DString += "-D %s='%s' "%(key, params[key])
return DString
# generate the sequcence, the sequence must be in prefer
def __generateCommandSequences(keys, sequences):
orders = []
lefts = list(set(keys) - set(sequences))
for seq in sequences:
if seq in keys:
orders.append(seq)
orders.extend(lefts)
return tuple(orders)
# return the key value string
def paramsString(params, promot, pattern = '', sequences = ()):
string = ''
orders = params.keys()
if sequences:
orders = __generateCommandSequences(params.keys(), sequences)
for key in orders:
if isinstance(params[key], (tuple, list)):
for value in params[key]:
if pattern:
string += "%s "%(pattern%(promot, key, value))
else:
string += "%s%s '%s' "%(promot, key, value)
else:
if pattern:
string += "%s "%(pattern%(promot, key, params[key]))
else:
string += "%s%s '%s' "%(promot, key, params[key])
return string
def delRequireParams(params, requires):
for field in requires:
if field in params:
del params[field]
return params
# the sqoop class, used to generate the command
class Sqoop:
CONFIG = {
'CONNECT': "jdbc:oracle:thin:@%s:%s:%s",
'COMMAND': '/yjcom/app/sqoop-1.4.4-cdh5.1.0/bin/sqoop',
'DELIMITER': {
'IMPORT': '\\t',
'EXPORT': '\\t',
},
'MAPS': {
'IMPORT': 1,
'EXPORT': 1,
},
}
def __init__(self):
pass
@classmethod
def Export(self, database, username, password, table,
targetDir, execute = False, **params):
requires = ('connect', 'username', 'password', 'export-dir', 'table')
command = "%s export "%(self.CONFIG['COMMAND'])
# consider the -D opt
if 'D' in params:
command += Dstring(params['D'])
del params['D'] # delete the 'D'
command += "--connect %s "%(self.CONFIG['CONNECT']%(database))
# add the basic information, username, password, targetDir
command += "--username %s "%(username)
command += "--password %s "%(password)
command += "--export-dir %s "%(targetDir)
# export data into table
command += "--table %s "%(table)
# del some parameters, connect, username, etc
params = delRequireParams(params, requires)
# num-mappers in the params
if 'num-mappers' not in params:
command += "--num-mappers %s "%(self.CONFIG['MAPS']['EXPORT'])
if 'input-fields-terminated-by' not in params:
command += "--input-fields-terminated-by '%s' "%(self.CONFIG['DELIMITER']['EXPORT'])
# not consider the -D value
command += paramsString(params, '--')
return runCommand(command, execute = execute, printResults = True)
# import data from oracle
# database, username, password, targetDir is must parameters
# the dict parameters format is:
# 'NAME': (value1, value2) is transformat into -NAME 'value1' -NAME 'value2'
# 'NAME': 'value' is tranformated into -NAME 'value'
# 'D' :{'NAME': value} is transformat into -D NAME=value
@classmethod
def Import(self, database, username, password,
targetDir, execute = False, remove = False, **params):
command = "%s import "%(self.CONFIG['COMMAND'])
# consider the -D opt
if 'D' in params:
command += Dstring(params['D'])
del params['D'] # delete the 'D'
command += "--connect %s "%(self.CONFIG['CONNECT']%(database))
# add the basic information, username, password, targetDir
command += "--username %s "%(username)
command += "--password %s "%(password)
command += "--target-dir %s "%(targetDir)
if remove and Hadoop.testsHadoopDir(targetDir, tp = 'e'):
Hadoop.removeHadoopFile(targetDir, execute = execute)
# num-mappers in the params
if 'num-mappers' not in params:
command += "--num-mappers %s "%(self.CONFIG['MAPS']['IMPORT'])
if 'query' in params and '$CONDITIONS' not in params['query']:
if 'where' in params['query']:
params['query'] += ' and \$CONDITIONS'
else:
params['query'] += ' where \$CONDITIONS'
if 'fields-terminated-by' not in params:
command += "--fields-terminated-by '%s' "%(self.CONFIG['DELIMITER']['IMPORT'])
# not consider the -D value
command += paramsString(params, '--', """%s%s "%s" """)
return runCommand(command, execute = execute, printResults = True)
# upper class for Sqoop, like Streaming class
class Sqooping:
def __init__(self, configs, **params):
if not self.__checkRequires(('TARGET', 'DB', 'USER', 'PASSWD'),
configs):
raise
self.__target = configs['TARGET']
self.__db = configs['DB']
self.__user, self.__passwd = configs['USER'],\
configs['PASSWD']
# judge the is import or export
# is type is export, then record the table
self.__type = 'IMPORT'
if 'TABLE' in configs:
self.__type = 'EXPORT'
self.__table = configs['TABLE']
# define the other params
# detail information please reference
# Sqoop.Import or Sqoop.Export
if 'PARAMS' in configs and isinstance(configs['PARAMS'], dict):
self.__params = configs['PARAMS']
else: self.__params = {}
# back up the configs
self.__configs = configs
def __checkRequires(self, requires, configs):
for r in requires:
if r not in configs: return False
return True
def setTarget(self, target):
self.__target = target
def getTarget(self):
return self.__target
def getConfigs(self):
return self.__configs.copy()
@classmethod
def __execOrTemplate(self):
return globals()['__execOrTemplate']
@classmethod
def __makeParams(self):
return globals()['__makeParams']
# the struct of the params is same as the default params
# the default params use the add and update the params
def updateParams(self, **params):
# update the params, if 'D' in params
if 'D' in params and 'D' in self.__params:
self.__params.update(params)
del params['D']
# update the other options
self.__params.update(params)
def prepareSqoop(self, tParams = (), pParams = {},
tbParams = ()):
self.__target = self.__execOrTemplate()(self.__target, tParams)
# if type is 'export', update the table
if self.__type == 'EXPORT':
self.__table = self.__execOrTemplate()(self.__table, tbParams)
# update the params
if pParams:
self.__params = self.__makeParams()(self.__params, pParams)
def importing(self, execute = False, remove = False):
results = Sqoop.Import(self.__db, self.__user, self.__passwd,
self.__target, execute = execute,
remove = remove, **self.__params)
return results
def exporting(self, execute = False):
results = Sqoop.Export(self.__db, self.__user, self.__passwd,
self.__table, self.__target, execute = execute,
**self.__params)
return results
def imported(self, tParams = (), pParams = {},
execute = False, remove = False):
self.prepareSqoop(tParams = tParams, pParams = pParams)
return self.importing(execute = execute)
def exported(self, tParams = (), pParams = {},
tbParams = (), execute = False):
self.prepareSqoop(tParams = tParams, pParams = pParams,
tbParams = tbParams)
return self.exporting(execute = execute)
class Sqluldr:
# some constants
CONFIG = {
'ADDENV': {
'LD_LIBRARY_PATH': '/opt/oracle/instantclient_10_2',
'ORACLE_HOME': '/opt/oracle/instantclient_10_2',
'TNS_ADMIN': '/opt/oracle/instantclient_10_2',
'ORACLE_IC_HOME': '/opt/oracle/instantclient_10_2',
'PATH': '/usr/java/default/bin:/usr/java/default/bin:'
'/usr/kerberos/bin:/usr/local/bin:/bin:/usr/bin:'
'/opt/oracle/instantclient_10_2:'
'/opt/oracle/instantclient_10_2:/opt/oracle/'
':/opt/oracle/instantclient_10_2:/opt/oracle/'
':/opt/oracle/instantclient_10_2:/opt/oracle/'
':/opt/oracle/instantclient_10_2:/opt/oracle/',
},
'COMMAND': 'sqluldr2',
'FIELD': {
'IMPORT': '0x09',
}
}
def __init__(self):
pass
# Import data from the oracle database
@classmethod
def Import(self, basic, query, outFile, execute = False, **params):
command = self.CONFIG['COMMAND']
command += " %s/%s@%s "%(basic)
command += """query="%s" """%(query)
command += "file=%s "%(outFile)
# set the delimiter of the fields
if 'field' not in params:
command += 'field=%s '%(self.CONFIG['FIELD']['IMPORT'])
for key in params:
if isinstance(params[key], (tuple, list)):
for value in params[key]:
command += "%s='%s' "%(key, value)
else:
command += "%s='%s' "%(key, params[key])
return runCommand(command, execute = execute,
environ = self.CONFIG['ADDENV'],
printResults = True)
class SqlPlus:
def __init__(self):
pass
class Tez:
def __init__(self, configs):
pass
def tez(self):
pass
class Streaming:
__CONFIG = {
'TMP': '/yjtest',
'': {
},
}
def __init__(self, configs):
if 'INPUT' not in configs:
raise
# store the current configs
self.__configs = configs.copy()
# define the input directory
self.__inputs = configs['INPUT']
# define the output directory
if 'OUTPUT' in configs:
self.__output = configs['OUTPUT']
else: # generate the temp directory
now = Utils.DateTimeUtil.now()
self.__output = "%s/%s/%s/%s/%s"%(self.__CONFIG['TMP'],
getpass.getuser(),
now.format("%Y-%m-%d"),
now.format("%H"),
UUID.uuid())
# define the mapper and reducer
# default mapper is 'cat' if not define the mapper
# default reducer is 'NONE' if not define the reducer
self.__mapper, self.__reducer = 'cat', 'NONE'
if 'MAPPER' in configs: self.__mapper = configs['MAPPER']
if 'REDUCER' in configs: self.__reducer = configs['REDUCER']
# define the other params
# the detail information of the params please reference
# Hadoop.MapRed define parmas
if 'PARAMS' in configs and isinstance(configs['PARAMS'], dict):
self.__params = configs['PARAMS']
else: self.__params = {}
# the struct of the params is same as the default params
# the default params use the add and update the params
def updateParams(self, **params):
# update the params, if 'D' in params
if 'D' in params and 'D' in self.__params:
self.__params.update(params)
del params['D']
# update the other options
self.__params.update(params)
def setInput(self, inputs):
self.__inputs = inputs
def setOutput(self, output):
self.__output = output
def setMapper(self, mapper):
self.__mapper = mapper
def setReducer(self, reducer):
self.__reducer = reducer
def getInput(self):
return self.__inputs
def getOutput(self):
return self.__output
def getMapper(self):
return self.__mapper
def getReducer(self):
return self.__reducer
# return the configs
def configs(self):
return self.__configs.copy()
@classmethod
def __execOrTemplate(self):
return globals()['__execOrTemplate']
@classmethod
def __makeParams(self):
return globals()['__makeParams']
# prepare streming
def prepareStream(self, iParams = (), oParams = (),
mParams = (), rParams = (), pParams = {}):
if iParams:
if isinstance(self.__inputs, (tuple, list)):
self.__inputs = [self.__execOrTemplate()(v, iParams[i])
for (i, v) in enumerate(self.__inputs)]
else: self.__inputs = self.__execOrTemplate()(self.__inputs,
iParams)
# generate output
self.__output = self.__execOrTemplate()(self.__output, oParams)
# mapper and reducer
self.__mapper = self.__execOrTemplate()(self.__mapper, mParams)
self.__reducer = self.__execOrTemplate()(self.__reducer, rParams)
# generate the params
if pParams:
self.__params = self.__makeParams()(self.__params, pParams)
# must first call the prepareStrem, then call this function
def streaming(self, remove = False, execute = False, skipper = True):
results = Hadoop.MapRed(self.__inputs, self.__output, self.__mapper,
self.__reducer, remove = remove, execute = execute,
skipper = skipper, **self.__params)
return results
# the wapper of the streaming command, not changing the object value
def stream(self, iParams = (), oParams = (), mParams = (),
rParams = (), pParams = {} ,remove = False, execute = False,
skipper = True):
self.prepareStream(iParams = iParams, oParams = oParams,
mParams = mParams, rParams = rParams, pParams = pParams)
return self.streaming(remove = remove, execute = execute,
skipper = skipper)
# newJob = Streaming(self.configs())
# newJob.prepareStream(iParams, oParams, mParams, rParams)
# return newJob.streaming(remove = remove, execute = execute, skipper = skipper)
# the Hadoop Class, used to generate command
class Hadoop:
def __init__():
pass
CONFIG = {
'PATH': '/yjcom/app/hadoop-2.3.0-cdh5.1.0/share/hadoop/mapreduce1/contrib/streaming/hadoop-streaming.jar',
'COMMAND': '/yjcom/app/hadoop-2.3.0-cdh5.1.0/bin-mapreduce1/hadoop',
'ORDERS': ('libjars', 'outputformat', ),
'UNIT': {
'B': 1024 ** 0,
'K': 1024 ** 1,
'M': 1024 ** 2,
'G': 1024 ** 3,
'T': 1024 ** 4,
},
}
@classmethod
def MR(self):
pass
# mainly running a streaming Job, if you wan't to run a jar job
# just use the MR function
# inDir, outDir, mapper, and reducer must be as a parameter
# the dict parameters format is:
# 'NAME': (value1, value2) is transformat into -NAME 'value1' -NAME 'value2'
# 'NAME': 'value' is tranformated into -NAME 'value'
# 'D' :{'NAME': value} is transformat into -D NAME=value
@classmethod
def MapRed(self, inDir, outDir, mapper = 'cat',
reducer = 'cat', path = '',
skipper = False, remove = False,
execute = False, **params):
if path == '':
path = self.CONFIG['PATH']
cmdString = "%s jar %s "%(self.CONFIG['COMMAND'], path)
# consider the -D opt
if 'D' in params:
cmdString += Dstring(params['D'])
del params['D'] # delete the 'D'
# not consider the -D value
cmdString += paramsString(params, '-', sequences = self.CONFIG['ORDERS'])
# add the mapper and reducer
cmdString += '-mapper "%s" -reducer "%s" '%(mapper, reducer)
cmdString += self.__AddMapRedOutput(outDir, remove, execute = execute)
inputString = self.__AddMapRedInput(inDir, skipper and execute)
if not inputString:
return __initResultsCommand__(code = 1, command = cmdString,
stderr = 'All the Input Direcotry is not exists')
# add the input
cmdString += inputString
return runCommand(cmdString, execute = execute, printResults = True)
# the function that add the input dirs
# if skipper = True, then skip the dirs that not exists
@classmethod
def __AddMapRedInput(self, inDirs, skipper):
inputs = ''
# add the in and out directory
if isinstance(inDirs, (list, tuple)):
inputs += ' '.join(["-input '%s'"%(idir) for idir in inDirs \
if not skipper or self.testsHadoopDir(idir)])
else:
if skipper and not self.testsHadoopDir(inDirs):
# if all the target dir is not exists, then return the empty string
return ''
inputs += "-input '%s' "%(inDirs)
return inputs
# the function that deal with the output
# if define the remove, if the target directory exists
# then remove the target directory
@classmethod
def __AddMapRedOutput(self, outDir, remove, execute = False):
outputs = ''
if remove and self.testsHadoopDir(outDir):
self.removeHadoopFile(outDir, execute = execute)
outputs += " -output '%s' "%(outDir)
return outputs
# remove a File or Directory in HDFS
# skipTrash: remove the file permanently
# force: remove the file by force
# recursive: remove directory
# execute: set True, execute the command, False just print the Command
@classmethod
def removeHadoopFile(self, remDir, skipTrash = False, force = True,
recursive = True, execute = False):
cmdRem = "%s fs -rm "%(self.CONFIG['COMMAND'])
# remove the directory
if recursive:
cmdRem += "-r "
# remove the directory by force
if force:
cmdRem += "-f "
# remove permanetly
if skipTrash:
cmdRem += "-skipTrash "
cmdRem += "'%s'"%(remDir)
return runCommand(cmdRem, execute = execute, printResults = True)
# get the file size of the hadoop
# if the target is the direcotry, then return the size of the directory
@classmethod
def sizeHadoopFile(self, fileName, unit = 'M', human = False):
# if the unit not is B, K, M, T, P, then return the Bytes
if unit not in self.CONFIG['UNIT']:
unit = 'B'
totalSize = [0]
command = "%s fs -du -s %s"%(self.CONFIG['COMMAND'], fileName)
def addSize(line):
totalSize[0] = line.split()[0]
results = runCommand(command, stdout = addSize, execute = True)
if results['CODE'] != 0:
return -1
return Utils.unitChange(totalSize[0], unitIn = 'B',
unitOut = 'M', human = human)
# create a dir
@classmethod
def mkdirHadoop(self, dirName, parent = True, execute = True):
command = "%s fs -mkdir "%(self.CONFIG['COMMAND'])
# if set the parent value
if parent:
command += "-p "
command += "'%s'"%(dirName)
# execute the command
return runCommand(command, execute = execute)
# judge a dir is exists in Hadoop File System
@classmethod
def testsHadoopDir(self, dirName, tp = 'e'):
command = "%s fs -test -%s "%(self.CONFIG['COMMAND'], tp)
command += "'%s'"%(dirName)
results = runCommand(command)
if results['CODE']:
return False
return True
# put a file into the hdfs
@classmethod
def putFileToHadoop(self, fileName, hdfsPath, execute = True, **params):
command = "%s fs -put "%(self.CONFIG['COMMAND'])
if 'D' in params:
command += Dstring(params['D'])
del params['D']
command += "%s %s"%(fileName, hdfsPath)
return runCommand(command, execute = execute, printResults = True)
# download a file from the hdfs
@classmethod
def getFileFromHadoop(self, hdfsPath, localPath = '', execute = True):
command = "%s fs -get "%(self.CONFIG['COMMAND'])
command += "%s %s "%(hdfsPath, localPath)
return runCommand(command, execute)
# copy a file between the Hdfs
@classmethod
def copyHadoopFiles(self, sourcePath, targetPath, execute = True):
command = "%s fs -cp "%(self.CONFIG['COMMAND'])
command += "%s %s "%(sourcePath, targetPath)
return runCommand(command, execute = execute, printResults = True)
# move a file in hdfs
@classmethod
def moveHadoopFiles(self, sourcePath, targetPath, execute = True):
command = "%s fs -mv "%(self.CONFIG['COMMAND'])
command += "%s %s "%(sourcePath, targetPath)
return runCommand(command, execute = execute, printResults = True)
# list all the files of a dir
# give the absolute path or relative path
@classmethod
def listHadoopDir(self, listDir, path = 'ABSOLUTE', execute = True):
command = "%s fs -ls %s"%(self.CONFIG['COMMAND'], listDir)
files = []
def addFile(line):
fields = line.split()
if len(fields) == 8:
files.append(fields[-1])
results = runCommand(command, execute = execute, stdout = addFile)
if results['CODE']:
return False
if path == 'RELATIVE':
return [os.path.basename(p) for p in files]
return files
# execute the pig command
class Pig:
# the class constant
CONFIG = {
'COMMAND': 'pig',
}
def __init__(self):
pass
# execute pig command
@classmethod
def pig(self, name, execute = False, **params):
command = "%s "%(self.CONFIG['COMMAND'])
# consider the -D opt
if 'D' in params:
command += Dstring(params['D'])
del params['D'] # delete the 'D'
# not consider the -D value
command += paramsString(params, '', pattern = '-param %s%s=%s')
command += " %s"%(name)
runCommand(command, execute = execute, printResults = True)
# program of sending email
class SMail:
CONFIG = {
'COMMAND': 'python /data/tools/send_mail.py ',
}
def __init__(self):
pass
@classmethod
def sendMail(self, mail_address, topic, content, execute = True):
command = self.CONFIG['COMMAND']
content = re.sub('`' , ' ', content)
content = re.sub("'" , "\'", content)
content = re.sub('"' , '\"', content)
command += """ -m '%s' """%(mail_address)
command += """ -t '%s' """%(topic)
command += """ -c "%s" """%(content)
return runCommand(command, execute = execute,
printString = '%s -m %s -t %s -c ...'%(self.CONFIG['COMMAND'],
mail_address,
topic))
class Local:
# The constant value of different units
CONFIG = {
'UNIT': {
'B': 1024 ** 0,
'K': 1024 ** 1,
'M': 1024 ** 2,
'G': 1024 ** 3,
'T': 1024 ** 4,
},
}
# get the size of directory or files
# default return the bytes
# The name can be directory or a list of files or a single File
# the pattern that the names must be statisticed
# now cann't support the rescurve the directory
@classmethod
def getSize(self, names, pattern = '',
unit = 'K', recursive = False):
size = 0 # the total size
# get the all the names that needed to be calculated
totalNames = []
if isinstance(names, (tuple, list)):
totalNames.extend(names)
elif isinstance(names, str):
totalNames.append(names)
for name in totalNames:
if not re.search(pattern, name): # if the name is not statistified the pattern
continue
if os.path.isdir(name): # the directory
size += self.__getDirSize(name, pattern,
recursive = recursive)
elif os.path.isfile(name): # the regular the file
size += os.path.getsize(name)
return size / self.CONFIG['UNIT'][unit[0].upper()] # return the user need unit
# the input must be a dirName
@classmethod
def __getDirSize(self, dirName, pattern = '', recursive = False):
size = 0
for name in os.listdir(dirName):
if not re.search(pattern, name):
continue
absName = os.path.join(dirName, name)
if os.path.isdir(absName): # for now, not support the rescurve
if recursive: # the rescurve
size += self.__getDirSize(absName, pattern, recursive)
else: # just think all the directory is zero
size += 0
elif os.path.isfile(absName): # the regular file
size += os.path.getsize(absName)
return size # the total size, byte is unit
@classmethod
def __isPattern(self, pattern, string):
if not pattern:
return False
if re.search(pattern, string):
return True
return False
@classmethod
def files(self, name, pattern = '',
recursive = False, directory = False):
results = set()
currentPath = os.path.abspath(os.path.curdir)
absName = os.path.join(currentPath, name)
# the input file is not exist just return
if not os.path.exists(name):
return results
# if is directory
if os.path.isdir(name):
listFiles = os.listdir(name)
for f in listFiles:
if self.__isPattern(pattern, os.path.basename(f)):
continue
# recusive for directory
if recursive:
results |= self.files(os.path.join(name, f),
pattern = pattern,
recursive = recursive,
directory = directory)
# need directory
elif directory:
results.add(os.path.join(name, f))
# only need file
elif not os.path.isdir(f):
results.add(os.path.join(name, f))
# just a file
elif not self.__isPattern(pattern, os.path.abspath(name)):
results.add(name)
# print(results)
return results
@classmethod
def mkdir(self, dirName, parent = True):
# file exists, do nothing
if os.path.exists(dirName):
return
if parent:
os.makedirs(dirName)
else:
os.mkdir(dirName)
# concat some files into one File
# if not given the outputFile, then generate
@classmethod
def concatFiles(self, dirName, pattern = ''):
pass
# The class of generate the uuid in python
class UUID:
def __init__(self):
pass
# generate the uuid for python
@classmethod
def uuid(self, *args ):
"""
Generates a universally unique ID.
Any arguments only create more randomness.
"""
t = int( time.time() * 1000 )
r = int( random.random() * 100000000000000000 )
try:
a = socket.gethostbyname( socket.gethostname() )
except:
# if we can't get a network address, just imagine one
a = random.random() * 100000000000000000
data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args)
data = md5.md5(data).hexdigest()
return data
class UniqFileName:
def __init__():
pass
# generate a time name of fileNames
@classmethod
def timeName():
pass
# generate a fileName of uuid
@classmethod
def uuidName():
pass
class FTPUtil:
__CONFIG = {
'PORT': '21',
}
def __init__(host = '', user = '', passwd = '', login = True):
pass
def login(user = '', passwd = ''):
pass
def download(self, fileName, target, relogin = True):
pass
def __downloadHelp(self, fileName, target):
pass
def delete(fileName, relogin = True):
pass
def list(directory):
pass
|
20,664 | c48d3025eec1d4854a9fb05b6e220a3cbaa66d95 | #!/usr/bin/python3
import base64
import hashlib
import subprocess
import sys
def run_cmd(correction_file, args):
args.insert(0, correction_file)
if correction_file.endswith('.py'):
args.insert(0, 'python3')
elif correction_file.endswith('.pl'):
args.insert(0, 'perl')
elif correction_file.endswith('.go'):
args.insert(0, 'run')
args.insert(0, 'go')
child = subprocess.Popen(args, stdout=subprocess.PIPE)
streamdata = child.communicate()[0]
ret = child.returncode
return streamdata.decode(), ret
def exploit(correction_file, secret):
"""
Check if the corrected source code is still vulnerable
"""
# Try to login with the old determinist token generation
token = hashlib.sha1(base64.b64encode(b"random_user")).hexdigest()
out, _ = run_cmd(correction_file, ['random_user', '', '/get-token'])
if token in out:
print("I'm still able to craft a valid token!")
return True
# Try to generate a bunch of tokens to see if the algorithm is deterministic
# (in the real world a manual review of the algorithm is of course needed)
outputs = set()
for i in range(20):
out, _ = run_cmd(correction_file, ['test', '', '/get-token'])
outputs.add(out)
if len(outputs) != i+1:
print('The algorithm used to generate the token looks too deterministic\n')
return True
return False
def main():
secret = sys.argv[1]
correction_file = sys.argv[2]
return_code = 3 if exploit(correction_file, secret) else 0
sys.exit(return_code)
if __name__ == "__main__":
main()
|
20,665 | 117381d021e957c017696d5e4555f7d87be34b8b | from typing import Any, List, Mapping, Optional
from valohai_yaml.objs.parameter import Parameter
class ParameterMap:
"""Container for an execution's parameters and the values assigned."""
def __init__(self, *, parameters: Mapping[str, Parameter], values: Mapping[str, Any]) -> None:
self.parameters = parameters
self.values = values
def build_parameters(self) -> List[str]:
"""
Build the CLI command line from the parameter values.
:return: list of CLI strings -- not escaped!
"""
param_bits = []
for name in self.parameters:
param_bits.extend(self.build_parameter_by_name(name) or [])
return param_bits
def build_parameter_by_name(self, name: str) -> Optional[List[str]]:
param = self.parameters[name]
value = self.values.get(param.name)
return param.format_cli(value)
|
20,666 | 5838abb0856e4d71d68ba8e5ff3e5aa3d22ccec2 | import os
from tqdm import tqdm
from copy import deepcopy
import torch
import torch.utils.data as data
from . import Constants
from .tree import Tree
class HATEDataset(data.Dataset):
def __init__(self, path, vocab, num_classes):
super(HATEDataset, self).__init__()
self.vocab = vocab
self.num_classes = num_classes
self.sentences = self.read_sentences(os.path.join(path, 'data.toks'))
self.trees = self.read_trees(os.path.join(path, 'data.parents'))
self.labels = self.read_labels(os.path.join(path, 'label.txt'))
self.tweets=self.read_tweets(os.path.join(path, 'data.txt'))
self.size = self.labels.size(0)
def __len__(self):
return self.size
def __getitem__(self, index):
tree = deepcopy(self.trees[index])
sent = deepcopy(self.sentences[index])
label = deepcopy(self.labels[index])
tweet= deepcopy(self.tweets[index])
return (tree, sent, label,tweet)
def read_sentences(self, filename):
with open(filename, 'r') as f:
sentences = [self.read_sentence(line) for line in tqdm(f.readlines())]
return sentences
def read_sentence(self, line):
indices = self.vocab.convertToIdx(line.split(), Constants.UNK_WORD)
return torch.tensor(indices, dtype=torch.long, device='cpu')
def read_trees(self, filename):
with open(filename, 'r') as f:
trees = [self.read_tree(line) for line in tqdm(f.readlines())]
return trees
def read_tree(self, line):
parents = list(map(int, line.split()))
trees = dict()
root = None
for i in range(1, len(parents) + 1):
if i - 1 not in trees.keys() and parents[i - 1] != -1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = Tree()
if prev is not None:
tree.add_child(prev)
trees[idx - 1] = tree
tree.idx = idx - 1
if parent - 1 in trees.keys():
trees[parent - 1].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
return root
def read_labels(self, filename):
with open(filename, 'r') as f:
labels = list(map(lambda x: float(x), f.readlines()))
labels = torch.tensor(labels, dtype=torch.float, device='cpu')
return labels
def read_tweets(self, filename):
with open(filename, 'r') as f:
tweets = list(map(lambda x: str(x), f.readlines()))
return tweets |
20,667 | 3a2871c1caae6f21375aab66d9e85c49f6cf7310 | from enum import Enum
from PyQt5.QtCore import *
from PyQt5.QtGui import QPainter, QPen
from sch.obj.line import LineTool, LineObj, LineEditor
import sch.obj.net
import sch.obj.text
import sch.obj.part
from sch.view import Event
from sch.utils import Layer, LayerType
import sch.document
from copy import copy
import sch.obj.proptext
class ToolType(Enum):
SelectTool = 0
LineTool = 1
NetTool = 2
TextTool = 3
PartTool = 4
def getCtrl(doc):
if type(doc) is sch.document.DocPage:
return SchController
elif type(doc) is sch.document.SymbolPage:
return SymController
else:
raise NotImplementedError()
class Controller(QObject):
sigUpdate = pyqtSignal()
sigToolChanged = pyqtSignal(int)
sigInspectorChanged = pyqtSignal()
def __init__(self, doc=None, view=None, lib=None):
super().__init__()
self._view = None
self._doc = None
self._tool = None
self._toolId = 0
self._grid = 5000
self.lib = lib
# properties
self.view = view
self.doc = doc
def handleEvent(self, event):
if self._tool is not None:
self._tool.handleEvent(event)
if event.handled:
return
if event.evType == Event.Type.KeyPressed:
event.handled = True
if event.key == Qt.Key_Escape:
self.handleEvent(Event(evType=Event.Type.Cancel))
elif event.key == Qt.Key_Space:
self.view.recenter()
elif event.key == Qt.Key_Enter:
self.handleEvent(Event(evType=Event.Type.Done))
# elif event.key == Qt.Key_L:
# self.changeTool(ToolType.LineTool)
# elif event.key == Qt.Key_N:
# self.changeTool(ToolType.NetTool)
# elif event.key == Qt.Key_S:
# self.changeTool(ToolType.SelectTool)
# elif event.key == Qt.Key_T:
# self.changeTool(ToolType.TextTool)
else:
event.handled = False
def _installTool(self, tool):
if self._tool is not None:
self._tool.finish()
self._tool = None
if self.view is None:
return
self._tool = tool
self._tool.sigUpdate.connect(self.sigUpdate)
self.sigToolChanged.emit(self.toolId)
def snapPt(self, pt: QPoint):
g = self.grid
return QPoint(int(round(float(pt.x())/g))*g, int(round(float(pt.y())/g))*g)
@property
def toolId(self):
return self._toolId
@property
def view(self):
return self._view
@view.setter
def view(self, view):
self._view = view
self.sigUpdate.connect(self._view.slotUpdate)
self._installTool(SelectTool(self))
@property
def grid(self):
return self._grid
@grid.setter
def grid(self, value):
self._grid = value
self.sigUpdate.emit()
@property
def doc(self):
return self._doc
@doc.setter
def doc(self, doc):
self._doc = doc
self._doc.sigChanged.connect(self.sigUpdate)
self.sigUpdate.emit()
@property
def inspector(self):
return self._tool.inspector
def getDrawables(self):
class JunctionDrawable:
@staticmethod
def draw(painter):
sch.obj.net.NetObj.drawJunctions(self.doc, painter)
out = list(self.doc.objects())
out.append(JunctionDrawable())
if self._tool is not None:
out.append(self._tool)
return out
@pyqtSlot(int)
def changeTool(self, tool_id):
if self._toolId == tool_id:
return
self._toolId = tool_id
self._installTool(self.tools()[tool_id](self))
self.sigInspectorChanged.emit()
@staticmethod
def tools():
raise NotImplementedError()
class SchController(Controller):
def __init__(self, doc=None, view=None, lib=None):
super().__init__(doc, view, lib)
@staticmethod
def tools():
return SelectTool, LineTool, sch.obj.net.NetTool, sch.obj.text.TextTool, sch.obj.part.PartTool
class SymController(Controller):
def __init__(self, doc=None, view=None, lib=None):
super().__init__(doc, view, lib)
@staticmethod
def tools():
return SelectTool, LineTool, sch.obj.text.TextTool, sch.obj.proptext.PropTextTool
class SelectTool(QObject):
sigUpdate = pyqtSignal()
def __init__(self, ctrl):
QObject.__init__(self)
self._ctrl = ctrl
self._selection = []
self._lastFind = []
self._editor = None
@property
def inspector(self):
if self._editor:
return self._editor.inspector
return None
@staticmethod
def name():
return "Select"
def finish(self):
self.releaseSelection()
self.sigUpdate.emit()
def draw(self, painter: QPainter):
if self._editor:
self._editor.draw(painter)
return
pen = QPen(Layer.color(LayerType.selection))
pen.setCapStyle(Qt.RoundCap)
pen.setJoinStyle(Qt.RoundJoin)
pen.setWidth(0)
painter.setBrush(Qt.NoBrush)
painter.setPen(pen)
for obj in self._selection:
painter.drawRect(obj.bbox().marginsAdded(QMargins(500,500,500,500)))
def handleEvent(self, event: Event):
if self._editor is not None:
self._editor.handleEvent(event)
if event.handled:
return
if event.evType == Event.Type.MousePressed:
objs = list(self._ctrl.doc.findObjsNear(event.pos, self._ctrl.view.hitRadius()))
if len(objs) > 0:
# cycle through objects under cursor
if set(self._lastFind) == set(objs) and len(self._selection) == 1:
ind = self._lastFind.index(self._selection[0])+1
if ind >= len(self._lastFind):
ind = 0
self._selection = [objs[ind]]
else:
self._lastFind = objs
self._selection = [objs[0]]
self.sigUpdate.emit()
else:
if self._selection:
self._selection = []
self._lastFind = []
self.sigUpdate.emit()
self.selectionChanged(event)
@pyqtSlot()
def releaseSelection(self):
self._selection.clear()
self.selectionChanged()
def selectionChanged(self, event=None):
self._editor = None
if len(self._selection) == 1 and type(self._selection[0]) is LineObj:
self._editor = LineEditor(self._ctrl, self._selection[0])
self._editor.sigUpdate.connect(self.sigUpdate)
self._editor.sigDone.connect(self.releaseSelection)
elif len(self._selection) == 1 and type(self._selection[0]) is sch.obj.net.NetObj:
self._editor = sch.obj.net.NetEditor(self._ctrl, self._selection[0])
self._editor.sigUpdate.connect(self.sigUpdate)
self._editor.sigDone.connect(self.releaseSelection)
elif len(self._selection) == 1 and type(self._selection[0]) is sch.obj.text.TextObj:
self._editor = sch.obj.text.TextEditor(self._ctrl, self._selection[0])
self._editor.sigUpdate.connect(self.sigUpdate)
self._editor.sigDone.connect(self.releaseSelection)
elif len(self._selection) == 1 and type(self._selection[0]) is sch.obj.part.PartObj:
self._editor = sch.obj.part.PartEditor(self._ctrl, self._selection[0])
self._editor.sigUpdate.connect(self.sigUpdate)
self._editor.sigDone.connect(self.releaseSelection)
elif len(self._selection) == 1 and type(self._selection[0]) is sch.obj.proptext.PropTextObj:
self._editor = sch.obj.proptext.PropTextEditor(self._ctrl, self._selection[0])
self._editor.sigUpdate.connect(self.sigUpdate)
self._editor.sigDone.connect(self.releaseSelection)
self._ctrl.sigInspectorChanged.emit()
if event and self._editor is not None:
self._editor.handleEvent(event)
# TODO: property inspector / editor
class EditHandle(QObject):
sigDragged = pyqtSignal('QPoint')
sigMoved = pyqtSignal('QPoint')
def __init__(self, ctrl, pos=QPoint()):
super().__init__()
self._ctrl = ctrl
self.pos = QPoint(pos)
self._dragging = False
self._moved = False
def draw(self, painter: QPainter):
r = self._ctrl.view.hitRadius() * 0.7
x, y = self.pos.x(), self.pos.y()
painter.drawRect(QRect(QPoint(x-r, y-r), QPoint(x+r, y+r)))
def testHit(self, pt: QPoint):
return (self.pos - self._ctrl.snapPt(pt)).manhattanLength() <= self._ctrl.view.hitRadius()
def handleEvent(self, event: Event):
if event.evType == Event.Type.MouseMoved:
if self._dragging:
if self.pos != self._ctrl.snapPt(event.pos):
self.pos = self._ctrl.snapPt(event.pos)
self.sigDragged.emit(self.pos)
self._moved = True
event.handled = True
elif event.evType == Event.Type.MousePressed:
if self.testHit(event.pos):
self._dragging = True
self._moved = False
event.handled = True
elif event.evType == Event.Type.MouseReleased:
if self._dragging:
self._dragging = False
if self._moved or self.pos != self._ctrl.snapPt(event.pos):
self.pos = self._ctrl.snapPt(event.pos)
self.sigMoved.emit(self.pos)
event.handled = True
class TextHandle(EditHandle):
def __init__(self, ctrl, txt):
super().__init__(ctrl, txt.posGlobal())
self._txt = txt
self._start = QPoint()
def testHit(self, pt: QPoint):
return self._txt.testHit(pt, 0)
def draw(self, painter: QPainter):
painter.drawRect(self._txt.bbox())
def handleEvent(self, event: Event):
if event.evType == Event.Type.MouseMoved:
if self._dragging:
if self.pos != self._ctrl.snapPt(self._start + event.pos):
self.pos = self._ctrl.snapPt(self._start + event.pos)
self.sigDragged.emit(self.pos)
self._moved = True
event.handled = True
elif event.evType == Event.Type.MousePressed:
if self.testHit(event.pos):
self._start = self.pos - event.pos
self._dragging = True
self._moved = False
event.handled = True
elif event.evType == Event.Type.MouseReleased:
if self._dragging:
self._dragging = False
# self.pos = self._ctrl.snapPt(event.pos)
if self._moved:
self.sigMoved.emit(self.pos)
event.handled = True
|
20,668 | cee7bd5c3b76c0ac2929014488106c7e73ec8796 |
def alpha(x,y,i):
return m.atan(y[i]/x[i])
def fric(f,x,y,l,i,mu,masse):
f[i]= mu*m.cos(alpha(x,y,i))*masse*9.81 * (l[i])
return f[i]
def pot(p,y,i,masse):
p[i]= masse*9.81*y[i]
return p[i]
def cin(c,x,y,l,i,masse):
pass
def vol(evol,x,y,l,i,I,omega):
evol[i]= I* (omega**2)/2 #a revoir pcq gros zbeul
return evol[i]
def elas(elas,x,y,l,i,k,dressort):
elas[i] = k* (dressort**2)/2
return elas[i]
def potint(petitpoids,h):
potint= petitpoids*h*9.81
return potint
def frot(frot,x,y,l,i):
pass
def etot(E,x,y,l,f,p,c,elas,evol):
etot[i]= p[i] + c[i] + elas[i] + evol[i] - f[i]
return etot[i]
f[i]= fric(f,x,y,l,i,mu,masse)
p[i]= pot(p,y,i,masse)
c[i]= cin(c,x,y,l,i,masse)
evol[i]= vol(evol,x,y,l,i,I,omega)
elas[i]= elas(elas,x,y,l,i,k,dressort)
frot[i]= frot(frot,x,y,l,i)
etot[i]= etot(E,x,y,l,f,p,c,elas,evol) |
20,669 | 55137abe9cb183626a158ae53355087e24b581a0 | # coding: utf8
import utilities as utl, os
@auth.requires_login()
def index():
if request.vars:
idaplicacao = int(request.vars.aplicacao_id or 0)
identcol = request.vars.sel_ent_col
else:
idaplicacao = int(session.aplicacao_id or 0)
identcol = session.identcol or 'Entidades'
if session.aplicacao_id <> idaplicacao:
session.aplicacao_id = idaplicacao
if session.identcol <> identcol:
session.identcol = identcol
if identcol == 'Entidades':
sqlE = """
SELECT aplicacoes.aplicacao
, mensagensentcol.codigoMensagem
, mensagenspadraoprefixo.descricao
, entidades.nomeamigavel
, mensagenspadraosufixo.descricao
FROM ((((aplicacoes
INNER JOIN mensagensentcol
ON aplicacoes.id = mensagensentcol.codigoaplicacao)
INNER JOIN entidades
ON mensagensentcol.codigoentcol = entidades.id)
INNER JOIN mensagenspadrao
ON mensagensentcol.codigomsgpadrao = mensagenspadrao.id)
INNER JOIN mensagenspadraoprefixo
ON mensagenspadrao.codigomsgprefixo = mensagenspadraoprefixo.id)
INNER JOIN mensagenspadraosufixo
ON mensagenspadrao.codigomsgsufixo = mensagenspadraosufixo.id
WHERE (((mensagensentcol.codigoaplicacao)={})
AND ((mensagensentcol.codigoorigemmsg)=1))
ORDER BY mensagensentcol.codigomensagem;
""".format(idaplicacao)
sqls = db.executesql(sqlE)
else:
sqlC = """
SELECT aplicacoes.aplicacao
, mensagensentcol.codigoMensagem
, mensagenspadraoprefixo.descricao
, colunas.attributename
, mensagenspadraosufixo.descricao
, colunas.id
, mensagenspadrao.codigoregra
FROM ((((aplicacoes
INNER JOIN mensagensentcol
ON aplicacoes.id = mensagensentcol.codigoaplicacao)
INNER JOIN colunas
ON mensagensentcol.codigoentcol = colunas.id)
INNER JOIN mensagenspadrao
ON mensagensentcol.codigomsgpadrao = mensagenspadrao.id)
INNER JOIN mensagenspadraoprefixo
ON mensagenspadrao.codigomsgprefixo = mensagenspadraoprefixo.id)
INNER JOIN mensagenspadraosufixo
ON mensagenspadrao.codigomsgsufixo = mensagenspadraosufixo.id
WHERE (((mensagensentcol.codigoaplicacao)={})
AND ((mensagensentcol.codigoorigemmsg)=2))
ORDER BY mensagensentcol.codigomensagem;
""".format(idaplicacao)
sqls = db.executesql(sqlC)
msgs = []
csvs = ""
for sql in sqls:
sql2 = sql[2].decode('utf-8').encode('cp1252')
sql3 = sql[3].decode('utf-8').encode('cp1252')
sql4 = sql[4].decode('utf-8').encode('cp1252')
if identcol == 'Entidades':
msgs.append('{}{:04}:{}'.format(sql[0]
,sql[1]
,((sql[2] + ' ' if sql[2].strip() else '') +
(sql[3] + ' ' if sql[3].strip() else '') +
(sql[4] if sql[4].strip() else ''))))
csvs += "{}{:04};{}\n".format(sql[0]
,sql[1]
,(sql2 + ' ' if sql2.strip() else '') +
(sql3 + ' ' if sql3.strip() else '') +
(sql4 if sql4.strip() else ''))
else:
sqlA = """
SELECT regrascolunas.argumento1
, regrascolunas.argumento2
FROM regrascolunas
WHERE regrascolunas.codigocoluna = {}
AND regrascolunas.codigoregra = {};
""".format(sql[5], sql[6])
args = db.executesql(sqlA)
if args:
arg0 = args[0][0].decode('utf-8').encode('cp1252')
arg1 = args[0][1].decode('utf-8').encode('cp1252')
msgs.append('{}{:04}:{} - {} {}'.format(sql[0]
,sql[1]
,((sql[2] + ' ' if sql[2].strip() else '') +
(sql[3] + ' ' if sql[3].strip() else '') +
(sql[4] if sql[4].strip() else ''))
,args[0][0]
,args[0][1]))
csvs += "{}{:04};{} - {} {}\n".format(sql[0]
,sql[1]
,(sql2 + ' ' if sql2.strip() else '') +
(sql3 + ' ' if sql3.strip() else '') +
(sql4 if sql4.strip() else '')
,arg0
,arg1)
else:
msgs.append('{}{:04}:{}'.format(sql[0]
,sql[1]
,((sql[2] + ' ' if sql[2].strip() else '') +
(sql[3] + ' ' if sql[3].strip() else '') +
(sql[4] if sql[4].strip() else ''))))
csvs += "{}{:04};{}\n".format(sql[0]
,sql[1]
,(sql2 + ' ' if sql2.strip() else '') +
(sql3 + ' ' if sql3.strip() else '') +
(sql4 if sql4.strip() else ''))
outcsv = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, os.getenv('temp')
, 'csvs.csv')
out = open(outcsv, 'w')
out.write(csvs)
out.close()
popups = []
if identcol and msgs:
popups.append(['Download Mensagens', 'downloadmsgs'])
return dict({'title': ['Aplicacao:',
str(utl.Select(db,
name='aplicacao_id',
table='aplicacoes',
fields=['id','descricao'],
filtro=('' if (auth.user) and
(auth.has_membership(1, auth.user.id,
'Administrador'))
else db['aplicacoes'].empresa==
auth.user.empresa),
value=session.aplicacao_id or 0)),
'<br/>Mensagens referente as {} geradas pelo Sistema'.format(
utl.SelectString(name='sel_ent_col',
options=[['Entidades','Entidades'],
['Colunas','Colunas']],
default=session.get('identcol',''),
submit=True)),
utl.buttonsDownload(popups)],
'mensagens': msgs})
@auth.requires_login()
def downloadmsgs():
germsg = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, os.getenv('temp')) + os.sep
filemsg = 'csvs.csv'
return listdetail.download(request, response, germsg, filemsg)
@auth.requires_login()
def download():
return response.download(request, db)
|
20,670 | 449c89f4a4550f5f980ee548cc11c319434456c6 | from sympy.ntheory.modular import crt
file = open("day13input.txt")
lines = file.readlines()
# data = list(map(int,lines))
# departure = int(lines.pop(0))
# tentBuses = lines.pop(0).strip().split(",")
# buses = []
# for element in tentBuses:
# if element != "x":
# buses.append(int(element))
# print(buses)
# it = 0
# earliest = [None] *len(buses)
# itTime = [None] *len(buses)
# while any(time is None for time in earliest) :
# it += 1
# for i in range(len(buses)):
# if buses[i] * it >= departure and earliest[i] is None:
# earliest[i] = buses[i] * it
# itTime[i] = it
# index = earliest.index(min(earliest))
# earlyBus = buses[index]
# print(earliest)
# print("Bus " + str(earlyBus) + ", iteration " + str(itTime[index]) + ", time " + str(itTime[index]* earlyBus))
# print((itTime[index]*earlyBus - departure) * earlyBus)
buses = lines[1]
moduli = []
residues = []
for i, bus in enumerate(buses.split(',')):
if bus != 'x':
bus = int(bus)
moduli.append(bus)
residues.append(bus - i)
print(crt(moduli, residues)[0])
file.close() |
20,671 | 76653bd724a6ee3a4a27bfe608e6e4db4daa5d91 | from math import sin, cos, tan, trunc, radians
print('=' * 10, 'DESAFIO 18', '=' * 10)
angulo = float(input('Digite algum ângulo: '))
print('O ângulo digitado foi {}°'.format(trunc(angulo)))
print('O Seno de {}° é {:.2f}'.format(trunc(angulo), sin(radians(angulo))))
print('O Cosseno de {}° é {:.2f}'.format(trunc(angulo), cos(radians(angulo))))
print('A Tangente de {}° é {:.2f}'.format(trunc(angulo), tan(radians(angulo))))
print('=' * 32)
|
20,672 | e7813da87ac2eb3cc19f4408048e55df5ace8820 | import re
import pytest
from tests.data_fixtures import order
from app.models import Order
# type hinting
from flask import Response
from typing import Dict
class TestViewOrderStatus:
def test_check_order(self, flask_client, order: Order):
res: Response = flask_client.get('/api/v1/order?email={email}&code={code}'.format(
email=order.email,
code=order.code
))
# default response check
assert "application/json" == res.content_type
assert 200 == res.status_code
# data check
data: Dict[str] = res.data
assert order.payee == data["payee"]
assert order.destination == data["destination"]
assert order.code == data["code"]
status_regex = re.compile(r"[0-9]{2}")
assert re.match(status_regex, data["status"])
|
20,673 | 60babe572a17788a74f218528b758b01e905e8f2 | import numpy as np
import torch as th
from h5py import h5r
from numpy.linalg import norm
from tqdm import trange
from util import plot
from h5rw import h5read, h5write
import matplotlib.pyplot as plt
path = '/home/philipp/projects2/tomo/2019-09-09_kate_pd/'
fn = 'kate_bg_pad_align.npy'
d = np.load(path + fn)
d = np.transpose(d, (2, 1, 0))
d = d[:,400:1200,400:1200]
# %%
s = np.sum(d, 1)
f, a = plt.subplots(figsize=(15, 12))
# s = s[[25, 0]]
for si in s:
a.plot(np.arange(len(si)), si)
plt.show()
# %%
d1 = th.from_numpy(d).float()
# %%
from torch.nn.functional import mse_loss
from torch.optim import Adam
refind = 25
ref = th.sum(d1[refind], 1).float()
f, a = plt.subplots(figsize=(15, 12))
a.plot(np.arange(len(si)), ref.numpy())
plt.show()
# %%
d2 = d1
fac = th.ones((d1.shape[0]), requires_grad=True)
last_loss = 0
opt = Adam([fac], lr=1e-2)
ds = th.sum(d2, 1).float()
for i in range(100):
opt.zero_grad()
s = ds * fac.unsqueeze(1).expand_as(ds)
loss = mse_loss(s, ref)
# f, a = plt.subplots(figsize=(15, 12))
# for si in s:
# a.plot(np.arange(len(si)), si.detach().numpy())
# a.plot(np.arange(len(si)), ref.numpy(), linewidth=5)
# plt.show()
loss.backward()
print(f"i: {i} L = {loss.item():3.6g} dL = {last_loss - loss.item():3.3g}")
opt.step()
# print(fac)
last_loss = loss.item()
# fac[j] = fac2.detach().item()
print(fac)
# %%
f, a = plt.subplots(figsize=(15, 12))
s = th.sum(d2 * fac.unsqueeze(1).unsqueeze(1).expand_as(d2), 1).float()
# s *= fac.unsqueeze(1).expand_as(s)
for si in s:
a.plot(np.arange(len(si)), si.detach().numpy())
a.plot(np.arange(len(si)), ref.numpy(), linewidth=5)
plt.show()
# %%
f, a = plt.subplots(figsize=(15, 12))
a.scatter(np.arange(len(fac)), fac.detach().numpy(), linewidth=5)
plt.show()
# %%
for i in np.arange(24, 27):
plot(d[i], f"{i}")
# %%
from numpy.fft import fftfreq, fft2, fftshift, ifft2
d2_corrected = d2 * fac.unsqueeze(1).unsqueeze(1).expand_as(d2)
d2_corrected = d2_corrected.detach().numpy()
for i, d in enumerate(d2_corrected):
plot(d, f"{i}")
# %%
h5write(path + '2020-01-09_kate_intensities_corrected.h5', data=d2_corrected)
# %%
d2s = np.sum(d2_corrected, 0)
plot(d2s)
from scipy.ndimage.measurements import center_of_mass
x = (d2s > 1e5).astype(np.float32)
com = center_of_mass(x)
print(com)
c = np.array(com).astype(np.int)
s = 350
f, a = plt.subplots()
a.imshow(d2s[c[0] - s:c[0] + s, c[1] - s:c[1] + s])
# a.scatter(com[1], com[0])
plt.show()
d2c = d2_corrected[:, c[0] - s:c[0] + s, c[1] - s:c[1] + s]
#%%
i = 2
x = np.log10(np.abs(fftshift(fft2(d2c[i]))))
plot(d2c[i], f"{i}")
plot(x, f"{i}")
# %%
h5write(path + '2019-10-07_cropped.h5', data=d2c)
# %%
def rebin(a, s, mode='sum'):
'''rebin ndarray data into a smaller ndarray of the same rank whose dimensions
are factors of the original dimensions. eg. An array with 6 columns and 4 rows
can be reduced to have 6,3,2 or 1 columns and 4,2 or 1 rows.
example usages:
>>> a=rand(6,4); b=rebin(a,(3,2))
>>> a=rand(6); b=rebin(a,2)
'''
shape = np.asarray(a.shape)
lenShape = len(shape)
args = np.ones_like(shape) * np.asarray(s)
factor = shape // args
s1 = tuple()
for i in range(lenShape):
s1 += (factor[i],)
s1 += (args[i],)
ret = a.reshape(s1)
for i in range(lenShape):
j = (lenShape - i) * 2 - 1
ret = ret.sum(j)
if mode == 'mean':
ret /= np.prod(args)
return ret
d2c_bin2 = rebin(d2c, (1, 2, 2))
h5write(path + '2019-10-07_cropped_bin2.h5', data=d2c)
#%%
from scipy.io import savemat
savemat(path + '2019-10-07_cropped.mat',{'data':d2c})
savemat(path + '2019-10-07_cropped_bin2.mat',{'data':d2c_bin2})
# %%
angles = np.array([6.403600311279296875 * 10,
6.220299911499023438 * 10,
6.005899810791015625 * 10,
5.808100128173828125 * 10,
5.605500030517578125 * 10,
5.404800033569335938 * 10,
5.205199813842773438 * 10,
5.005799865722656250 * 10,
4.805099868774414062 * 10,
4.607400131225585938 * 10,
4.413700103759765625 * 10,
4.203799819946289062 * 10,
3.703699874877929688 * 10,
3.407500076293945312 * 10,
3.101199913024902344 * 10,
2.804999923706054688 * 10,
2.505400085449218750 * 10,
2.206399917602539062 * 10,
1.902499961853027344 * 10,
1.605599975585937500 * 10,
1.306299972534179688 * 10,
1.006599998474121094 * 10,
7.063000202178955078,
4.046999931335449219,
1.054999947547912598,
-1.904000043869018555,
-4.939000129699707031,
-7.941999912261962891,
-1.093700027465820312 * 10,
-1.393400001525878906 * 10,
-1.691099929809570312 * 10,
-1.995299911499023438 * 10,
-2.291099929809570312 * 10,
-2.691399955749511719 * 10,
-2.993199920654296875 * 10,
-3.291099929809570312 * 10,
-3.590999984741210938 * 10,
-3.890200042724609375 * 10,
-4.190999984741210938 * 10,
-4.389799880981445312 * 10,
-4.594599914550781250 * 10,
-4.793799972534179688 * 10,
-4.991500091552734375 * 10,
-5.191799926757812500 * 10,
-5.391600036621093750 * 10,
-5.591099929809570312 * 10,
-5.791299819946289062 * 10,
-5.988399887084960938 * 10,
-6.190499877929687500 * 10
])
savemat(path + 'angles.mat', {'angles':angles})
# %%
fd = fftshift(np.log10(np.abs(fft2(d2_corrected))), (1, 2))
# %%
c = fd.shape[1] // 2
s = 150
fd1 = fd[:, c - s:c + s, c - s:c + s]
plot(fd1[25])
# %%
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.font_manager as fm
import matplotlib.patches as patches
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
savePath = None
cmap = ['inferno', 'inferno']
title = ['Tilt %02d' % 0, 'FFT if tilt %02d' % 0]
show = True
figsize = (10, 10)
pix = 100
q = 0.1736 * pix
scale = (pix, f'{q / 10} nm')
im1, im2 = d2c[0], fd1[0]
fig = plt.figure(figsize=figsize)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(wspace=0, hspace=0.2) # set the spacing between axes.
ax0 = plt.subplot(gs1[0, 0])
ax1 = plt.subplot(gs1[0, 1])
imax0 = ax0.imshow(im1, interpolation='nearest', cmap=plt.cm.get_cmap(cmap[0]))
imax1 = ax1.imshow(im2, interpolation='nearest', cmap=plt.cm.get_cmap(cmap[0]))
ax0.set_title(title[0])
ax1.set_title(title[1])
for ax in [ax0, ax1]:
ax.set_xticks([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_xticklabels([])
if scale is not None:
fontprops = fm.FontProperties(size=18)
scalebar = AnchoredSizeBar(ax0.transData,
scale[0], scale[1], 'lower right',
pad=0.1,
color='white',
frameon=False,
size_vertical=im1.shape[0] / 40,
fontproperties=fontprops)
ax0.add_artist(scalebar)
# patch = patches.Circle((14.5, 14.5), radius=14, transform=ax0.transData, fill=None, color='r')
# ax0.add_patch(patch)
ax0.grid(False)
ax1.grid(False)
plt.tight_layout()
if show:
plt.show()
# %%
def animate(ind):
imax0.set_data(d2c[ind])
imax1.set_data(fd1[ind])
title = ['Tilt %02d' % ind, 'FFT if tilt %02d' % ind]
ax0.set_title(title[0])
ax1.set_title(title[1])
lin_ani = animation.FuncAnimation(fig, animate, frames=np.arange(d2_corrected.shape[0]), repeat=True)
FFwriter = animation.FFMpegWriter(fps=25)
lin_ani.save('/home/philipp/projects2/tomo/2019-09-09_kate_pd/02_denoise/cropped.mp4', writer=FFwriter)
|
20,674 | 4ed6a634f4d7295d6e8c15d7c8ae2c24107528d6 | x = input().split()
a = int(x[0])
b = int(x[1])
c = a - b * 2
if c > 0:
print(c)
else:
print(0)
|
20,675 | f45347af947a3f4668c1d86d69a9d891d1825df7 | import json
import os
from subprocess import PIPE, run
from aliyunsdkcore import client
from aliyunsdkafs.request.v20180112 import AuthenticateSigRequest
from aliyunsdkcore.profile import region_provider
from flask import Flask, render_template, jsonify, request
region_provider.modify_point('afs', 'cn-hangzhou', 'afs.aliyuncs.com')
env_dist = os.environ
# ACCESS_KEY、ACCESS_SECRET请替换成您的阿里云accesskey id和secret
ACCESS_KEY = env_dist.get('ACCESS_KEY', '')
ACCESS_SECRET = env_dist.get('ACCESS_SECRET', '')
APP_KEY = env_dist.get('APP_KEY', '')
SCENE = env_dist.get('SCENE', 'ic_activity')
NAME = env_dist.get('NAME', 'faucet')
CHAIN_ID = env_dist.get('CHAIN_ID', 'fuxi-develop')
AMOUNT = env_dist.get('AMOUNT', '10iris')
PASSWORD = env_dist.get('PASSWORD', '1234567890')
NODE = env_dist.get('NODE', 'tcp://192.168.150.7:46657')
# clt = client.AcsClient('YOUR ACCESSKEY', 'YOUR ACCESS_SECRET', 'cn-hangzhou')
clt = client.AcsClient(ACCESS_KEY, ACCESS_SECRET, 'cn-hangzhou')
ali_request = AuthenticateSigRequest.AuthenticateSigRequest()
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/apply', methods=['POST'])
def apply():
token = request.values.get("token", "")
session_id = request.values.get("session_id", "")
sig = request.values.get("sig", "")
ip = request.remote_addr
address = request.values.get("address", "")
if address.strip() == "":
return jsonify({"err_code": "401", "err_msg": "address is empty"})
if verify(token, session_id, sig, ip):
address = request.values.get("address", "")
send(address)
return jsonify({"data": address})
return jsonify({"err_code": "402", "err_msg": "verify error"})
def verify(token, session_id, sig, ip):
# 必填参数:从前端获取,不可更改
ali_request.set_SessionId(session_id)
# 必填参数:从前端获取,不可更改,android和ios只变更这个参数即可,下面参数不变保留xxx
ali_request.set_Sig(sig)
# 必填参数:从前端获取,不可更改
ali_request.set_Token(token)
# 必填参数:从前端获取,不可更改
ali_request.set_Scene(SCENE)
# 必填参数:后端填写
ali_request.set_AppKey(APP_KEY)
# 必填参数:后端填写
ali_request.set_RemoteIp(ip)
try:
result = clt.do_action_with_exception(ali_request) # 返回code 100表示验签通过,900表示验签失败
except Exception:
return False
print(result)
s = bytes.decode(result)
j = json.loads(s)
if j.get('Code', -100) == 100:
return True
return False
def send(address):
send_faucet = "iriscli send --to={0} --name={1} --chain-id={2} --amount={3} --node={4}".format(
address, NAME, CHAIN_ID, AMOUNT, NODE)
print(send_faucet)
p = run([send_faucet], shell=True, stdout=PIPE, input=(PASSWORD + "\n").encode())
print(p.stdout)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=4000)
|
20,676 | 41b16a44a94f12dd9ec6677a2dacc162bfe8b094 | from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
import datetime
__all__ = ['BaseNote', 'BaseTemporalManager', 'BaseTemporalModel',
'CurrencyField']
class BaseNote(models.Model):
"""Base class for notes with optional attachment."""
author = models.ForeignKey(User, editable=False)
created = models.DateTimeField(editable=False, auto_now_add=True)
text = models.TextField()
attachment = models.FileField(upload_to='attachments',
blank=True, null=True)
class Meta:
abstract = True
def __unicode__(self):
return self.text
class BaseTemporalManager(models.Manager):
"""Manager for BaseTemporalModel."""
use_for_related_fields = True
def filter_active(self, when=None):
if when is None:
when = datetime.datetime.now()
return self.get_query_set() \
.filter(models.Q(start__isnull=True) | models.Q(start__lte=when)) \
.filter(models.Q(end__isnull=True) | models.Q(end__gte=when))
def filter_archived(self, when=None):
if when is None:
when = datetime.datetime.now()
return self.get_query_set().filter(end__lt=when)
def filter_future(self, when=None):
if when is None:
when = datetime.datetime.now()
return self.get_query_set().filter(start__gt=when)
class BaseTemporalModel(models.Model):
"""Base class for objects having 'start' and 'end' datetime."""
start = models.DateTimeField(blank=True, null=True)
end = models.DateTimeField(blank=True, null=True)
objects = BaseTemporalManager()
class Meta:
abstract = True
def clean(self):
if self.start and self.end and self.end < self.start:
raise ValidationError('negative intervals are not allowed')
def save(self, *args, **kwargs):
self.clean()
super(BaseTemporalModel, self).save(*args, **kwargs)
def is_active_at(self, when=None):
if when is None:
when = datetime.datetime.now()
return ((not self.start or self.start <= when) and
(not self.end or self.end >= when))
@property
def is_active(self):
return self.is_active_at(None)
@property
def is_future(self):
return self.start and self.start > datetime.datetime.now()
@property
def is_archived(self):
return self.end and self.end < datetime.datetime.now()
def days_range(self, frm=None, to=None, filter=None):
# return days which are both within [frm,to] and [start,end] ranges,
# while allowing any param to be None (unbounded); if both frm and
# start are None, no days are returned; if both to and end are None,
# current time is used as the end time
# days are optionally filtered by the filter function (should return
# True for days which should be included)
if frm:
if self.start:
frm = max(frm, self.start)
else:
if self.start:
frm = self.start
else:
return [] # startless unbounded - return [] for sanity
if to:
if self.end:
to = min(to, self.end)
else:
if self.end:
to = self.end
else:
return [] # endless unbounded - return [] for sanity
retval = []
while frm < to:
if filter is None or filter(frm.date()):
retval.append(frm.date())
frm += datetime.timedelta(days=1)
return retval
@property
def days(self):
return len(self.days_range())
class CurrencyField(models.DecimalField):
def __init__(self, *args, **kwargs):
max_digits = kwargs.pop('max_digits', 11)
decimal_places = kwargs.pop('decimal_places', 2)
super(CurrencyField, self).__init__(*args, max_digits=max_digits,
decimal_places=decimal_places, **kwargs)
|
20,677 | 22b51cb6590fc7b63b1473df2427aa9b3f769f76 | from tkinter import *
from tkinter import ttk
import random
from bubbleSort import *
from quickSort import *
from mergeSort import *
from insertionSort import *
from selectionSort import *
from heapSort import *
root = Tk()
root.title('Sorting Algorithm Visualisation')
root.maxsize(1500, 1500)
root.config(bg='black')
arr = []
def drawData(data,colorArray):
canvas.delete("all")
if len(data)<=100:
c_height = 500
c_width = 1300
x_width = c_width / (len(data) + 1)
offset = 5
spacing = 5
normalizedData = [ i / max(data) for i in data]
for i, height in enumerate(normalizedData):
x0 = i * x_width + offset + spacing
y0 = c_height - height * 480
x1 = (i + 1) * x_width + offset
y1 = c_height
canvas.create_rectangle(x0, y0, x1, y1, fill= colorArray[i] )
canvas.create_text(x0+2, y0, anchor=SW, text=str(data[i]))
#canvas.create_text(10, 20, anchor=SW, text='Operations :' + str(count) )
root.update_idletasks()
else:
c_height = 500
c_width = 1300
x_width = c_width / (len(data) + 1)
offset = 5
spacing = 2
normalizedData = [ i / max(data) for i in data]
for i, height in enumerate(normalizedData):
x0 = i * x_width + offset + spacing
y0 = c_height - height * 480
x1 = (i + 1) * x_width + offset
y1 = c_height
canvas.create_rectangle(x0, y0, x1, y1, fill= colorArray[i] )
#canvas.create_text(10, 20, anchor=SW, text='Operations :' + str(count) )
root.update_idletasks()
def startAlgorithm():
global arr
if not arr:
return
if menu.get() == 'Quick Sort':
quickSort(arr, 0, len(arr)-1, drawData, speed.get())
drawData(arr, ['green' for x in range(len(arr))])
elif menu.get() == 'Bubble Sort':
bubbleSort(arr, drawData, speed.get())
#drawData(arr, ['green' for x in range(len(arr))])
elif menu.get() == 'Merge Sort':
mergeSort(arr,0,len(arr) - 1 ,drawData, speed.get())
#drawData(arr, ['green' for x in range(len(arr))])
elif menu.get() == 'Insertion Sort':
insertionSort(arr,drawData, speed.get())
elif menu.get() == 'Selection Sort':
selectionSort(arr,drawData, speed.get())
elif menu.get() == 'Heap Sort':
heapSort(arr,drawData, speed.get())
def gen():
global arr
minval = int(minVal.get())
maxval = int(maxVal.get())
size = int(inputSize.get())
if minval > maxval:
minval,maxval = maxval,minval
arr = []
for _ in range(size):
arr.append( random.randrange(minval , maxval + 1 ) )
print(arr)
drawData(arr,['red' for x in range(len(arr))] )
selected_alg = ''
canvas = Canvas(root, width=1310, height=500, bg='light blue')
canvas.grid(row=1, column=0, padx=2, pady=2)
UI_frame = Frame(root, width= 1300, height=300, bg='pink')
UI_frame.grid(row=2, column=0, padx=2, pady=2)
Label(UI_frame, text="Select Algorithm: ", bg='OliveDrab1',width=35,height=2).grid(row=2, column=1, padx=5, pady=5)
menu = ttk.Combobox(UI_frame, textvariable=selected_alg, width=65 , values=['Bubble Sort', 'Merge Sort','Quick Sort','Insertion Sort','Selection Sort','Heap Sort'])
menu.grid(row=2, column=2, padx=5, pady=5)
menu.current(0)
Button(UI_frame, text="Start Sorting", command= startAlgorithm ,width=35,height=2 , bg='orchid2').grid(row=2, column=3, padx=5, pady=5)
speed = Scale(UI_frame, from_ = 1, to =40 , length = 420 , digits = 1 , resolution=1, orient =HORIZONTAL , label = "Select Speed" )
speed.grid(row = 3 , column = 2 , pady = 3 )
inputSize = Scale(UI_frame, from_ = 1, to =300 , length = 420, resolution=1, orient =HORIZONTAL , label = "Array Size" )
inputSize.grid(row=4, column=1, padx=5, pady=5, sticky=W)
maxVal = Scale(UI_frame, from_ = 1, to =400 , length = 420, resolution=1, orient =HORIZONTAL , label = "Maximum Value" )
maxVal.grid(row=4, column=2, padx=5, pady=5, sticky=W)
minVal = Scale(UI_frame, from_ = 1, to =400 , length = 420, resolution=1, orient =HORIZONTAL , label = "Minimum Value" )
minVal.grid(row=4, column=3, padx=5, pady=5, sticky=W)
Button(UI_frame, text="Generate Array",width=35,height=2, command=gen, bg='SeaGreen1').grid(row=5, column=2, padx=5, pady=5)
root.mainloop() |
20,678 | 9b4cfebfd245d4c96f8d657829b8c8ab42c5d7c5 | from django.db import models
class Comment(models.Model):
owner_user_profile = models.ForeignKey('accounts.Signal_User_Profile', on_delete=models.CASCADE)
comment_text = models.TextField(max_length=5000)
def __str__(self):
return str(self.comment_text[:50]) |
20,679 | e8e262c87137386cbabea902492f09ebd657a53b | print pow(int(raw_input()),int(raw_input()))+pow(int(raw_input()),int(raw_input()))
|
20,680 | ee4e098e02df8667c1a0d264e33013d7dc9fc18e | import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
import glob
import os
import csv
def normData(dataX):
dataX_norm = dataX
for i in range(len(dataX[0, :])):
Max = max(dataX_norm[:, i])
Min = min(dataX_norm[:, i])
if (Max - Min) > 1.0e-6:
dataX_norm[:, i] = (dataX_norm[:, i] - Min) / (Max - Min)
else:
dataX_norm[:, i] = - Max
return dataX_norm
dataX = []
dataX_2d_list = []
dataNames = []
dataY = []
# load data X
# 333 .. 2/28
dataXPath = "data_weather/"
nNum = 0
fNum = 0
for f in glob.glob(os.path.join(dataXPath, "*.txt")):
# get data
data_raw = np.loadtxt(f, delimiter=",", dtype=None, skiprows=1)
data_feature = data_raw[:334,:]
dataX_2d_list.append(data_feature)
fNum = len(data_feature[0,:])*len(data_feature[:,0])
data = data_feature
nNum += 1
for val in data:
dataX.append(val)
# get names
file = open(f, mode='r')
data_reader = csv.reader(file,delimiter=",")
data_raw = [row for row in data_reader]
dataNames = data_raw[0]
dataX = np.array(dataX)
dataX = np.reshape(dataX, (nNum,fNum))
# load data Y
dataYName = "data_sakura/sakura_2018_2001.txt"
data_tgt = np.loadtxt(dataYName, delimiter=",")
dataY = data_tgt
## plot 2dlist data
minY, maxY = min(dataY), max(dataY)
dataY_01 = (dataY - minY) / (maxY - minY)
figAll_plt = plt.figure()
figAll_plt.subplots_adjust(wspace=0.4, hspace=0.6)
for i in range(len(dataNames)):
numcol = 3
numrow = len(dataNames) / numcol + 1
ax = figAll_plt.add_subplot(numrow,numcol,i+1)
for j in range(nNum):
y = dataX_2d_list[j][:,i]
x = range(len(y))
cval = ""
if dataY_01[j] >= 0.5:
cval = "#ee0000"
else:
cval = "#0000ee"
ax.plot(x,y,c=cval, alpha=0.7)
#carr = np.array([dataY_01[j] for nn in range(len(x))])
#ax.scatter(x, y, c=carr, cmap="jet")
ttl = dataNames[i]
ax.set_title(ttl)
ax.set_xlabel("x")
ax.set_ylabel("y")
figAll_plt.show()
plt.show()
## calc 1d st
minY, maxY = min(dataY), max(dataY)
dataY_01 = (dataY - minY) / (maxY - minY)
figStat_plt = plt.figure()
figStat_plt.subplots_adjust(wspace=0.4, hspace=0.6)
ftoShow = [0, 1, 3, 5, 6]
for i in range(len(ftoShow)):
f_mean = []
f_max = []
f_min = []
f_var = []
for j in range(nNum):
y = dataX_2d_list[j][:,i]
f_mean.append(sum(y)/float(len(y)))
f_max.append(max(y))
f_min.append(min(y))
f_var.append(np.std(y))
f_mean = np.array(f_mean)
f_max = np.array(f_max)
f_min = np.array(f_min)
f_var = np.array(f_var)
x = range(nNum)
ax = figStat_plt.add_subplot(len(ftoShow), 4, 4*i + 1)
ax.scatter(f_mean, dataY, color="#222222", alpha=0.7)
ttl = dataNames[i] + "_mean"
ax.set_title(ttl)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 2)
ax.scatter(f_max, dataY, color="#222222", alpha=0.7)
ttl = dataNames[i] + "_max"
ax.set_title(ttl)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 3)
ax.scatter(f_min, dataY, color="#222222", alpha=0.7)
ttl = dataNames[i] + "_min"
ax.set_title(ttl)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 4)
ax.scatter(f_var, dataY, color="#222222", alpha=0.7)
ttl = dataNames[i] + "_std"
ax.set_title(ttl)
ax.set_xlabel("x")
ax.set_ylabel("y")
figStat_plt.show()
plt.show()
# calc statistics. Use it as feature set.
dataX_stat = []
for j in range(nNum):
for i in range(len(dataNames)):
y = dataX_2d_list[j][:,i]
dataX_stat.append(sum(y)/float(len(y)))
dataX_stat.append(max(y))
dataX_stat.append(min(y))
dataX_stat.append(np.std(y))
dataX_stat = np.array(dataX_stat)
dataX_stat = np.reshape(dataX_stat, (nNum, len(dataNames)*4))
dataX = dataX_stat
# data normalization
dataX = normData(dataX)
# dataXminY, maxY = min(dataY), max(dataY)
# dataY = (dataY - minY) / (maxY - minY)
# Split the data into training/testing sets
numTest = 2
dataX_train = dataX[:-numTest]
dataX_test = dataX[-numTest:]
# Split the targets into training/testing sets
dataY_train = dataY[:-numTest]
dataY_test = dataY[-numTest:]
# data training and get results
regr = linear_model.LinearRegression()
regr.fit(dataX_train, dataY_train)
dataY_pred = regr.predict(dataX_test)
print('Coefficients: \n', regr.coef_)
print("Mean squared error: %.2f"
% mean_squared_error(dataY_test, dataY_pred))
print('Variance score: %.2f' % r2_score(dataY_test, dataY_pred))
fa = dataY_test - dataY_pred
fa *= fa
fb = dataY_test - ( sum(dataY_test) / len(dataY_test))
fb *= fb
myr2 = 1- sum(fa)/ sum(fb)
sortedIdx = np.argsort(abs(regr.coef_))
print("sorted idx: ", sortedIdx[::-1])
numShow = 3
fig = plt.figure()
for i in range(numShow):
fIdx = sortedIdx[::-1][i]
ax = fig.add_subplot(1,numShow,i+1)
ax.scatter(dataX_test[:,fIdx], dataY_test, color="#222222", alpha=0.7)
dataIdx = np.argsort(dataX_test[:,fIdx])
x = dataX_test[:,fIdx][dataIdx]
y = dataY_pred[dataIdx]
ax.plot(x,y, color='blue', linewidth=3)
# ttl = dataNames[fIdx] + " " + "{:.2f}".format(regr.coef_[fIdx])
ttl = str(fIdx) + " " + "{:.2f}".format(regr.coef_[fIdx])
ax.set_title(ttl)
ax.set_xlabel("x")
ax.set_ylabel("y")
fig.show()
plt.show() |
20,681 | 887c25457eb9925385eb18cda4648ea81b06721d | """
---------------------------------------------------
----- Exemplos de uso - Controle de diretório -----
---------------------------------------------------
Script responsável por consolidar um exemplo de
aplicação associado a criação de um report de
controle de diretório considerando todos os arquivos
presentes em uma determinada origem
Sumário
---------------------------------------------------
1. Configuração inicial
1.1 Importando bibliotecas
1.2 Definição de variáveis do projeto
2. Gerando report de controle de diretório
---------------------------------------------------
"""
# Autor: Thiago Panini
# Data: 18/04/2021
"""
---------------------------------------------------
------------ 1. CONFIGURAÇÃO INICIAL --------------
1.1 Importando bibliotecas
---------------------------------------------------
"""
# Bibliotecas padrão
from dotenv import load_dotenv, find_dotenv
import os
# Filescope
from filescope.manager import controle_de_diretorio, generate_visual_report
"""
------------------------------------------------------
-------------- 1. CONFIGURAÇÃO INICIAL ---------------
1.2 Definição de variáveis do projeto
------------------------------------------------------
"""
# Lendo variáveis de ambiente
load_dotenv(find_dotenv())
# Definindo variáveis de diretório
SRC_PATH = os.getenv('SRC_PATH')
DST_PATH = os.getenv('DST_PATH')
"""
------------------------------------------------------
----- 3. GERANDO REPORT DE CONTROLE DE DIRETÓRIO -----
------------------------------------------------------
"""
# Controle de diretório
df_root = controle_de_diretorio(root=SRC_PATH, save=False, output_path=DST_PATH)
# Analisando visualmente o report
generate_visual_report(df=df_root, output_path=DST_PATH) |
20,682 | 417ffc38f9c05c796b7e921d84878a085dcf762c | from Headers.TwitterCommunicator import *
from Headers.Payoff.PayoffCalculator import *
from Headers.Payoff.DataCruncher import *
from Headers.Payoff.GoogleScraper import *
from Headers.NetworkData import *
import networkx as nx
import math
import matplotlib.pyplot as plt
def allCombinations(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in allCombinations(items[i+1:],n-1):
yield [items[i]]+cc
def findPotentialAdopters(graph, numAdopters):
for v in graph:
lol=0
def draw_graph(graph, layout):
plt.figure()
nodes = graph.nodes()
colors = ['r' if graph.node[n]['choice'] == 'A' else 'b'
for n in graph]
plt.axis('off')
nx.draw_networkx(graph, nodelist=nodes, with_labels=True,
width=1, node_color=colors,alpha=.5,
pos=layout)
def setAdopters(graph, adopters):
for a in adopters:
graph.node[a]['choice']='A'
def doCascade(graph, adopters):
#draw_graph(graph, nx.spring_layout(graph))
#plt.show()
newAdopters = 0
for v in graph:
if graph.node[v]['choice'] == 'B':
a_neighbors = [w for w in graph.neighbors(v)
if graph.node[w]['choice'] == 'A']
b_neighbors = [w for w in graph.neighbors(v)
if graph.node[w]['choice'] == 'B']
p = 1. * len(a_neighbors) / (len(a_neighbors) + len(b_neighbors))
q = 1 - graph.node[v]['payoff']
if p >= q:
graph.node[v]['choice'] = 'A'
newAdopters = newAdopters + 1
#else:
#graph.node[v]['choice'] = 'B'
#draw_graph(graph, nx.spring_layout(graph))
#plt.show()
#draw_graph(graph, nx.spring_layout(graph))
#plt.show()
return newAdopters
tags = ['celebrity', 'famous', 'party']
numAdopters = 6
nData = NetworkData()
nData.loadFriendNetworkData('dump_friendNetworks_prev.txt')
pCalc = PayoffCalculator(nData, tags)
pCalc.loadPayoffs()
for key in pCalc.payOffs.keys():
if math.isnan(pCalc.payOffs[key]) or pCalc.payOffs[key] == 0:
del pCalc.payOffs[key]
sortedPayOffsList = sorted(pCalc.payOffs.iteritems(), key=lambda (k, v): (-v, k))[:]
lol=0
friendGraph = nx.Graph()
for friend in pCalc.payOffs.keys():
friendsOfFriend_tObjects = list(nData.friendNetworks[friend].get_iterator())
friendsOfFriend_snames = [k['screen_name'] for k in friendsOfFriend_tObjects]
myOtherFriends_snames = list(pCalc.payOffs.keys())
myOtherFriends_snames = [x for x in myOtherFriends_snames if x is not friend]
for f in friendsOfFriend_snames:
if f in myOtherFriends_snames:
friendGraph.add_edge(friend, f)
nx.set_node_attributes(friendGraph, 'payoff', 0.)
nx.set_node_attributes(friendGraph, 'choice', 'B') #Choice 'A' is positive & 'B' is negative.
for n in friendGraph.nodes():
friendGraph.node[n]['payoff'] = pCalc.payOffs[n]
clusters = list(nx.connected_component_subgraphs(friendGraph))
cluster_sizes = [len(c.nodes()) for c in clusters]
cluster_sizeMap = zip(clusters, cluster_sizes)
clusterCount = len(clusters)
for cluster in clusters:
ad = cluster.nodes()[0:15]
l = nx.spring_layout(friendGraph)
setAdopters(cluster, ad)
#draw_graph(cluster, nx.spring_layout(cluster))
#plt.show()
while True:
newAdopters = doCascade(cluster, ad)
print newAdopters
if newAdopters == 0:
break
#draw_graph(cluster, nx.spring_layout(cluster))
#plt.show()
lol=0
lol=0 |
20,683 | d8aa4ea340ad632d5106a9d058f2c866e9197173 | """
"""
try:
from stage_check import Output
except ImportError:
import Output
try:
from stage_check import OutputDeviceState
except ImportError:
import OutputDeviceState
def create_instance():
return OutputDeviceStateJson()
class OutputDeviceStateJson(OutputDeviceState.Base, Output.Json):
"""
"""
def __init__(self):
super().__init__()
|
20,684 | 10ece8ea23cf20f979c20b195b86b9e6c6bba02c | from django.conf.urls import url
from django.urls import reverse
from . import views
from iron2.forms import LoginForm
from django.contrib.auth.views import login,logout,password_reset,password_reset_done,password_reset_confirm,password_reset_complete
urlpatterns=[
url(r'^login/$',login,{'template_name':'iron2/login.html','authentication_form':LoginForm},name='login'),
url(r'^logout/$',logout,{'template_name':'iron2/logout.html'},name='logout'),
url(r'^register/$',views.register,name='register'),
url(r'^pdf_files/(?P<id>\d+)/$',views.pdf_view,name='pdf_files'),
url(r'^upload/$',views.model_form_upload,name='upload'),
url(r'^profile/$',views.profile,name='profile'),
url(r'^userprofile/$',views.UserProfileView,name='UserProfileView'),
url(r'^profile/(?P<pk>\d+)/$',views.profile,name='profile_with_pk'),
url(r'^edit_form/',views.edit_form,name='edit_form'),
url(r'^change_password/',views.change_password,name='change_password'),
url(r'^reset_password/',password_reset,
{'template_name':'iron2/reset_password.html','post_reset_redirect':'iron2:password_reset_done','email_template_name':'iron2/reset_password_email.html'},
name='reset_password'),
url(r'^reset_password_done/',password_reset_done,name='password_reset_done'),
url(r'^reset_password_confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/',password_reset_confirm,name='password_reset_confirm'),
url(r'^reset_password_complete/',password_reset_complete,name='password_reset_complete'),
] |
20,685 | 666830f704973f9d5fde2297c163fe37bea3b7eb | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 19:08:35 2020
@author: Hando
"""
from typing import Optional, List, Callable, Union, Dict
from qiskit import QuantumCircuit
from qiskit.aqua import AquaError
from qiskit.circuit.library.standard_gates import YGate, ZGate, XGate, HGate, IGate
class GateWrapper:
def __init__(self,
circuit)->None:
if circuit is None:
raise AquaError("No quantum circuit was passed.")
else:
self.circuit=circuit
def wrap(self, gates=None):
if gates is None:
raise AquaError("No gates were passed.")
wrapped_qc=QuantumCircuit(self.circuit.num_qubits)
for gate in gates:
for inst, qargs, cargs in self.circuit.data:
wrapped_qc.append(inst, qargs, cargs)
if inst.name in ("barrier", "measure"):
continue
else:
if len(qargs)==1:
wrapped_qc.append(gate, qargs, cargs)
"""
else:
wrapped_qc.append(HGate(), [qargs[0]], [])
wrapped_qc.append(ZGate(), [qargs[1]], [])
"""
return wrapped_qc |
20,686 | 1368034c3f1de2c2cc080bc8639fb049cfdcf672 | #Package import
from flask import Flask, render_template, send_file, make_response, url_for, Response, redirect, request
import io
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#initialise app
app = Flask(__name__)
#decorator for homepage
@app.route('/' )
def index():
return render_template('index.html',
PageTitle = "Landing page")
if __name__ == '__main__':
app.run(debug = True)
#These functions will run when POST method is used.
@app.route('/', methods = ["POST"] )
def plot_png():
#gathering file from form
uploaded_file = request.files['txt_file']
#making sure its not empty
if uploaded_file.filename != '':
#reading the file
text = uploaded_file.read()
#converting to a string.
text = str(text)
#You can then run any scripts you want on our file.
#Here we used a text file so any sort of text analysis could be undertaken
#You could even run machine learning on a csv dataset.
print(len(text))
#Here I want to visualise my output for my users - so I return a plot.
#Plotting
output = io.BytesIO()
FigureCanvas(text).print_png(output)
return Response(output.getvalue(), mimetype = 'image/png')
#The created image will be opened on a new page
else:
return render_template('index.html',
PageTitle = "Landing page")
#This just reloads the page if no file is selected and the user tries to POST. |
20,687 | 69718a1257bd5f7ce99e495c4c3b57ff5b9a80d8 | from django.shortcuts import *
from django.http import *
from .models import *
from .forms import *
import operator
from django.template.defaultfilters import slugify
from users.models import Usermodel
from movies.models import Review2
def index(request):
movies = Movie.objects.all()
searched = False
search_term = "hi"
sorted_bool = False
sorted_by = "critic"
if 'alphabet' in request.GET:
movies = sorted(movies, key=operator.attrgetter('title'))
sorted_bool = True
sorted_by = 'alphabetically by title'
if 'critic_score' in request.GET:
movies = sorted(movies, key=operator.attrgetter('critic_score'))
movies = movies[::-1]
sorted_bool = True
sorted_by = 'by critic score'
if 'user_score' in request.GET:
movies = sorted(movies, key=operator.attrgetter('user_score'))
movies = movies[::-1]
sorted_bool = True
sorted_by = 'by user score'
if 'search' in request.GET:
search_term = request.GET['search']
movies = Movie.objects.all().filter(title__icontains=search_term)
searched = True
try:
usermodel = Usermodel.objects.get(user_id=request.user.id)
except:
usermodel = None
context = {
'movies': movies,
'usermodel' : usermodel,
'searched' : searched,
'term' : search_term,
'sorted' : sorted_bool,
'sorted_by' : sorted_by,
}
return render(request, 'index.html', context)
def search(request, term):
movies = Movie.objects.all()
context = {
'movies': movies
}
return (render, 'index.html', context)
def details(request, link):
movie = Movie.objects.get(link=link)
user_reviews = list(reversed(Review2.objects.filter(movie=movie.title,review_type = False)))[:3]
critic_reviews = list(reversed(Review2.objects.filter(movie=movie.title,review_type = True)))[:3]
try:
usermodel = Usermodel.objects.get(user_id=request.user.id)
except:
usermodel = None
context = {
'movie': movie,
'user_reviews' : user_reviews,
'critic_reviews' : critic_reviews,
'usermodel' : usermodel
}
return render(request, 'details.html', context)
def submitMovie(request):
try:
usermodel = Usermodel.objects.get(user_id=request.user.id)
except:
usermodel = None
if request.method == "POST":
form = MovieForm(request.POST, request.FILES)
link = slugify(request.POST['title'])
if form.is_valid():
obj = form.save(commit = False)
obj.link = link
obj.save()
return redirect('..')
else:
context = {'form': form, 'usermodel' : usermodel}
return render(request, 'submitMovie.html', context=context)
else:
form = MovieForm()
context = {'form': form, 'usermodel' : usermodel}
return render(request, 'submitMovie.html', context)
def submitReview(request,link):
movie = Movie.objects.get(link=link)
try:
usermodel = Usermodel.objects.get(user_id=request.user.id)
except:
usermodel = None
if not request.user.is_authenticated or request.user.is_superuser:
error_message = "You do not have permission to view this page. Unlucky."
context = {'usermodel' : usermodel, 'error_message' : error_message}
return render(request, 'pages/errormessage.html', context=context)
reviews_by_user = Review2.objects.filter(movie = movie,user = request.user)
if len(reviews_by_user) > 0:
error_message = "You've already submitted a review for this movie!"
context = {'usermodel' : usermodel, 'error_message' : error_message}
return render(request,'pages/errormessage.html',context=context)
if request.method == "POST":
form = ReviewForm(request.POST)
if form.is_valid():
movie = Movie.objects.get(link=link)
new_post = form.save(commit=False)
new_post.movie = movie
new_post.critic = usermodel.user_name
new_post.user_id = request.user.id
new_post.review_type = True if usermodel.critic else False
new_post.save()
if not usermodel.critic:
movie.user_reviews += 1
movie.user_aggScore += float(request.POST['score'])
movie.user_score = movie.user_aggScore / movie.user_reviews
movie.save()
else:
movie.critic_reviews += 1
movie.critic_aggScore += float(request.POST['score'])
movie.critic_score = movie.critic_aggScore / movie.critic_reviews
movie.save()
return redirect('../../details/' + movie.link);
else:
return redirect("/")
else:
form = ReviewForm()
context = {'form': form, 'usermodel' : usermodel, 'movie' : movie, 'type' : "Critic" if usermodel.critic else "User"}
return render(request, 'submitReview.html', context)
def nowplaying(request):
movies = Movie.objects.all().filter(inTheater = True)
searched = False
search_term = "hi"
sorted_bool = False
sorted_by = "critic"
if 'alphabet' in request.GET:
movies = sorted(movies, key=operator.attrgetter('title'))
sorted_bool = True
sorted_by = 'alphabetically by title'
if 'critic_score' in request.GET:
movies = sorted(movies, key=operator.attrgetter('critic_score'))
movies = movies[::-1]
sorted_bool = True
sorted_by = 'by critic score'
if 'user_score' in request.GET:
movies = sorted(movies, key=operator.attrgetter('user_score'))
movies = movies[::-1]
sorted_bool = True
sorted_by = 'by user score'
if 'search' in request.GET:
search_term = request.GET['search']
movies = movies.filter(title__icontains=search_term)
searched = True
try:
usermodel = Usermodel.objects.get(user_id=request.user.id)
except:
usermodel = None
context = {
'movies': movies,
'usermodel' : usermodel,
'searched' : searched,
'term' : search_term,
'sorted' : sorted_bool,
'sorted_by' : sorted_by,
}
return render(request, 'nowplaying.html', context)
def allReviews(request,link,review_type):
movie = Movie.objects.get(link=link)
type_boolean = True if review_type == "critic" else False
reviews = list(reversed(Review2.objects.filter(movie = movie,review_type = type_boolean)))
try:
usermodel = Usermodel.objects.get(user_id=request.user.id)
except:
usermodel = None
context = {'usermodel' : usermodel, 'reviews' : reviews, 'movie' : movie, 'review_text' : "Critic" if review_type == "critic" else "User", 'review_type' : type_boolean}
return render(request, 'allReviews.html', context)
|
20,688 | 317d26d3f7215693a774ff16f983689bfd6f446d | import numpy as np
import cv2
import process
import hogfeature
import model
def setData(path,number):
j = 0
featureData = []
for i in range(number):
if(i < 10):
img = cv2.imread(path+"/images0"+str(i)+".jpg",0)
else:
img = cv2.imread(path+"/images"+str(i)+".jpg",0)
rects = process.getContours(img)
objects = process.getObjectData(rects,img)
if(len(objects) == 1):
#img 70x50 = 3500 pixel
feature = np.array(hogfeature.getFeature(objects[0]).reshape(3500,1).astype(np.float16))
featureData.append(feature)
print "setImg "+str(j)+":Done"
j = j + 1
return featureData
def writeDataTrainning(io_path,n):
path = "../Data/01"
print "setDataClass1"
featureDataClass1 = setData(path,n)
featureDataClass1 = np.array(featureDataClass1)
path = "../Data/02"
print "setDataClass2"
featureDataClass2 = setData(path,n)
featureDataClass2 = np.array(featureDataClass2)
path = "../Data/03"
print "setDataClass3"
featureDataClass3 = setData(path,n)
featureDataClass3 = np.array(featureDataClass3)
print "DataClass1"
#print np.size(featureDataClass1[0,:])
#print np.size(featureDataClass1[:,0])
[m1,S1] = model.Gaussian_ML_estimate(featureDataClass1)
np.savetxt(io_path + "m1.csv",m1,delimiter=",")
np.savetxt(io_path + "S1.csv",S1,delimiter=",")
print "Done"
print "DataClass2"
#print np.size(featureDataClass2[0,:])
#print np.size(featureDataClass2[:,0])
[m2,S2] = model.Gaussian_ML_estimate(featureDataClass2)
np.savetxt(io_path + "m2.csv",m2,delimiter=",")
np.savetxt(io_path + "S2.csv",S2,delimiter=",")
print "Done"
print "DataClass3"
#print np.size(featureDataClass3[0,:])
#print np.size(featureDataClass3[:,0])
[m3,S3] = model.Gaussian_ML_estimate(featureDataClass3)
np.savetxt(io_path + "m3.csv",m3,delimiter=",")
np.savetxt(io_path + "S3.csv",S3,delimiter=",")
print "Done"
m = np.array([m1,m2,m3])
S = np.array([S1,S2,S3])
"""
print "m:"
print m
print "S:"
print S
"""
return [m,S]
def readDataTrainning(io_path):
m1 = np.genfromtxt(io_path + "m1.csv",delimiter=",")
#S1 = np.genfromtxt(io_path + "S1.csv",delimiter=",")
m2 = np.genfromtxt(io_path + "m2.csv",delimiter=",")
#S2= np.genfromtxt(io_path + "S2.csv",delimiter=",")
m3 = np.genfromtxt(io_path + "m3.csv",delimiter=",")
#S3 = np.genfromtxt(io_path + "S3.csv",delimiter=",")
m1 = np.array([m1]).T
m2 = np.array([m2]).T
m3 = np.array([m3]).T
m= np.array([m1,m2,m3])
#S = np.array([S1,S2,S3])
return m
io_path = "../Data/trainning/"
"""
print "read trainning data"
[m,S] = readDataTrainning(io_path)
print "done"
print "read dataset1"
img1 = cv2.imread("../Data/01/images41.jpg")
rects1 = process.getContours(img1)
objects1 = process.getObjectData(rects1,img1)
if(len(objects1) == 1):
feature1 = np.array(hogfeature.getFeature(objects1[0]).reshape(1,3500).astype(np.float32))
print "done"
print "read dataset2"
img2 = cv2.imread("../Data/02/images41.jpg")
rects2 = process.getContours(img2)
objects2 = process.getObjectData(rects2,img2)
if(len(objects2) == 1):
feature2 = np.array(hogfeature.getFeature(objects2[0]).reshape(1,3500).astype(np.float32))
print "done"
print "read dataset3"
img3 = cv2.imread("../Data/03/images27.jpg")
rects3 = process.getContours(img3)
objects3= process.getObjectData(rects3,img3)
if(len(objects3) == 1):
feature3 = np.array(hogfeature.getFeature(objects3[0]).reshape(1,3500).astype(np.float32))
print "done"
print "euclidean xe dap"
print model.euclidean_classifier(m,feature1)
print "euclidean xe tay ga"
print model.euclidean_classifier(m,feature2)
print "euclidean xe so"
print model.euclidean_classifier(m,feature3)
"""
#[m,S] = writeDataTrainning(io_path,50)
print "Done"
|
20,689 | fe86abd0cb57d8968418cfa1337e1cc75c48da33 | """ This file contains the views used in the products app """
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import Product, Department
from .forms import ProductForm
# Create your views here.
def all_products(request):
""" This returns all products """
products = Product.objects.all()
departments = None
if request.GET:
if 'department' in request.GET:
departments = request.GET['department'].split(',')
products = products.filter(department__name__in=departments)
departments = Department.objects.filter(name__in=departments)
context = {
'products': products,
'current_departments': departments,
}
return render(request, 'products/products.html', context)
def all_prints(request):
""" This returns all print objects """
prints = Product.objects.filter(department="1")
context = {
'prints': prints,
}
return render(request, 'products/prints.html', context)
def all_frames(request):
""" This returns all frame objects """
frames = Product.objects.filter(department="2")
context = {
'frames': frames,
}
return render(request, 'products/frames.html', context)
def product_detail(request, product_id):
""" This displays the details for each specific product """
product = get_object_or_404(Product, pk=product_id)
context = {
'product': product,
}
return render(request, 'products/product_detail.html', context)
@login_required
def add_product(request):
""" This allows an admin to add new products to the website """
if not request.user.is_superuser:
messages.error(request, 'This feature is for Admin only.')
return redirect(reverse('home'))
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES)
if form.is_valid():
product = form.save()
messages.success(request, 'Successfully Added new product.')
return redirect(reverse('product_detail', args=[product.id]))
else:
messages.error(request,
'Failed to add product, please enter valid form.')
else:
form = ProductForm()
template = 'products/add_product.html'
context = {
'form': form,
}
return render(request, template, context)
@login_required
def edit_product(request, product_id):
""" This allows an admin to edit products on the website """
if not request.user.is_superuser:
messages.error(request, 'This feature is for Admin only.')
return redirect(reverse('home'))
product = get_object_or_404(Product, pk=product_id)
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES, instance=product)
if form.is_valid():
form.save()
messages.success(request, 'Successfully updated product.')
return redirect(reverse('product_detail', args=[product.id]))
else:
messages.error(request,
'Failed to update, please ensure form is valid.')
else:
form = ProductForm(instance=product)
messages.info(request, f'You are editing { product.name }')
template = 'products/edit_product.html'
context = {
'form': form,
'product': product,
}
return render(request, template, context)
@login_required
def delete_product(request, product_id):
""" This allows an admin to delete products on the website """
if not request.user.is_superuser:
messages.error(request, 'This feature is for Admin only.')
return redirect(reverse('home'))
product = get_object_or_404(Product, pk=product_id)
product.delete()
messages.success(request, 'Product Deleted')
return redirect(reverse('home'))
|
20,690 | 563a2fe973c08b8c6f36335d7e29e7f2f74493af | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import inspect
import textwrap
from six.moves import zip_longest
import llnl.util.tty as tty
import llnl.util.tty.color as color
from llnl.util.tty.colify import colify
import spack.cmd.common.arguments as arguments
import spack.fetch_strategy as fs
import spack.repo
import spack.spec
from spack.package_base import has_test_method, preferred_version
description = "get detailed information on a particular package"
section = "basic"
level = "short"
header_color = "@*b"
plain_format = "@."
def padder(str_list, extra=0):
"""Return a function to pad elements of a list."""
length = max(len(str(s)) for s in str_list) + extra
def pad(string):
string = str(string)
padding = max(0, length - len(string))
return string + (padding * " ")
return pad
def setup_parser(subparser):
subparser.add_argument(
"-a", "--all", action="store_true", default=False, help="output all package information"
)
options = [
("--detectable", print_detectable.__doc__),
("--maintainers", print_maintainers.__doc__),
("--no-dependencies", "do not " + print_dependencies.__doc__),
("--no-variants", "do not " + print_variants.__doc__),
("--no-versions", "do not " + print_versions.__doc__),
("--phases", print_phases.__doc__),
("--tags", print_tags.__doc__),
("--tests", print_tests.__doc__),
("--virtuals", print_virtuals.__doc__),
]
for opt, help_comment in options:
subparser.add_argument(opt, action="store_true", help=help_comment)
arguments.add_common_arguments(subparser, ["package"])
def section_title(s):
return header_color + s + plain_format
def version(s):
return spack.spec.version_color + s + plain_format
def variant(s):
return spack.spec.enabled_variant_color + s + plain_format
class VariantFormatter(object):
def __init__(self, variants):
self.variants = variants
self.headers = ("Name [Default]", "When", "Allowed values", "Description")
# Formats
fmt_name = "{0} [{1}]"
# Initialize column widths with the length of the
# corresponding headers, as they cannot be shorter
# than that
self.column_widths = [len(x) for x in self.headers]
# Expand columns based on max line lengths
for k, e in variants.items():
v, w = e
candidate_max_widths = (
len(fmt_name.format(k, self.default(v))), # Name [Default]
len(str(w)),
len(v.allowed_values), # Allowed values
len(v.description), # Description
)
self.column_widths = (
max(self.column_widths[0], candidate_max_widths[0]),
max(self.column_widths[1], candidate_max_widths[1]),
max(self.column_widths[2], candidate_max_widths[2]),
max(self.column_widths[3], candidate_max_widths[3]),
)
# Don't let name or possible values be less than max widths
_, cols = tty.terminal_size()
max_name = min(self.column_widths[0], 30)
max_when = min(self.column_widths[1], 30)
max_vals = min(self.column_widths[2], 20)
# allow the description column to extend as wide as the terminal.
max_description = min(
self.column_widths[3],
# min width 70 cols, 14 cols of margins and column spacing
max(cols, 70) - max_name - max_vals - 14,
)
self.column_widths = (max_name, max_when, max_vals, max_description)
# Compute the format
self.fmt = "%%-%ss%%-%ss%%-%ss%%s" % (
self.column_widths[0] + 4,
self.column_widths[1] + 4,
self.column_widths[2] + 4,
)
def default(self, v):
s = "on" if v.default is True else "off"
if not isinstance(v.default, bool):
s = v.default
return s
@property
def lines(self):
if not self.variants:
yield " None"
else:
yield " " + self.fmt % self.headers
underline = tuple([w * "=" for w in self.column_widths])
yield " " + self.fmt % underline
yield ""
for k, e in sorted(self.variants.items()):
v, w = e
name = textwrap.wrap(
"{0} [{1}]".format(k, self.default(v)), width=self.column_widths[0]
)
if all(spec == spack.spec.Spec() for spec in w):
w = "--"
when = textwrap.wrap(str(w), width=self.column_widths[1])
allowed = v.allowed_values.replace("True, False", "on, off")
allowed = textwrap.wrap(allowed, width=self.column_widths[2])
description = []
for d_line in v.description.split("\n"):
description += textwrap.wrap(d_line, width=self.column_widths[3])
for t in zip_longest(name, when, allowed, description, fillvalue=""):
yield " " + self.fmt % t
def print_dependencies(pkg):
"""output build, link, and run package dependencies"""
for deptype in ("build", "link", "run"):
color.cprint("")
color.cprint(section_title("%s Dependencies:" % deptype.capitalize()))
deps = sorted(pkg.dependencies_of_type(deptype))
if deps:
colify(deps, indent=4)
else:
color.cprint(" None")
def print_detectable(pkg):
"""output information on external detection"""
color.cprint("")
color.cprint(section_title("Externally Detectable: "))
# If the package has an 'executables' of 'libraries' field, it
# can detect an installation
if hasattr(pkg, "executables") or hasattr(pkg, "libraries"):
find_attributes = []
if hasattr(pkg, "determine_version"):
find_attributes.append("version")
if hasattr(pkg, "determine_variants"):
find_attributes.append("variants")
# If the package does not define 'determine_version' nor
# 'determine_variants', then it must use some custom detection
# mechanism. In this case, just inform the user it's detectable somehow.
color.cprint(
" True{0}".format(
" (" + ", ".join(find_attributes) + ")" if find_attributes else ""
)
)
else:
color.cprint(" False")
def print_maintainers(pkg):
"""output package maintainers"""
if len(pkg.maintainers) > 0:
mnt = " ".join(["@@" + m for m in pkg.maintainers])
color.cprint("")
color.cprint(section_title("Maintainers: ") + mnt)
def print_phases(pkg):
"""output installation phases"""
if hasattr(pkg, "phases") and pkg.phases:
color.cprint("")
color.cprint(section_title("Installation Phases:"))
phase_str = ""
for phase in pkg.phases:
phase_str += " {0}".format(phase)
color.cprint(phase_str)
def print_tags(pkg):
"""output package tags"""
color.cprint("")
color.cprint(section_title("Tags: "))
if hasattr(pkg, "tags"):
tags = sorted(pkg.tags)
colify(tags, indent=4)
else:
color.cprint(" None")
def print_tests(pkg):
"""output relevant build-time and stand-alone tests"""
# Some built-in base packages (e.g., Autotools) define callback (e.g.,
# check) inherited by descendant packages. These checks may not result
# in build-time testing if the package's build does not implement the
# expected functionality (e.g., a 'check' or 'test' targets).
#
# So the presence of a callback in Spack does not necessarily correspond
# to the actual presence of built-time tests for a package.
for callbacks, phase in [
(pkg.build_time_test_callbacks, "Build"),
(pkg.install_time_test_callbacks, "Install"),
]:
color.cprint("")
color.cprint(section_title("Available {0} Phase Test Methods:".format(phase)))
names = []
if callbacks:
for name in callbacks:
if getattr(pkg, name, False):
names.append(name)
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
# PackageBase defines an empty install/smoke test but we want to know
# if it has been overridden and, therefore, assumed to be implemented.
color.cprint("")
color.cprint(section_title("Stand-Alone/Smoke Test Methods:"))
names = []
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
if has_test_method(pkg_cls):
pkg_base = spack.package_base.PackageBase
test_pkgs = [
str(cls.test)
for cls in inspect.getmro(pkg_cls)
if issubclass(cls, pkg_base) and cls.test != pkg_base.test
]
test_pkgs = list(set(test_pkgs))
names.extend([(test.split()[1]).lower() for test in test_pkgs])
# TODO Refactor START
# Use code from package_base.py's test_process IF this functionality is
# accepted.
v_names = list(set([vspec.name for vspec in pkg.virtuals_provided]))
# hack for compilers that are not dependencies (yet)
# TODO: this all eventually goes away
c_names = ("gcc", "intel", "intel-parallel-studio", "pgi")
if pkg.name in c_names:
v_names.extend(["c", "cxx", "fortran"])
if pkg.spec.satisfies("llvm+clang"):
v_names.extend(["c", "cxx"])
# TODO Refactor END
v_specs = [spack.spec.Spec(v_name) for v_name in v_names]
for v_spec in v_specs:
try:
pkg_cls = spack.repo.path.get_pkg_class(v_spec.name)
if has_test_method(pkg_cls):
names.append("{0}.test".format(pkg_cls.name.lower()))
except spack.repo.UnknownPackageError:
pass
if names:
colify(sorted(names), indent=4)
else:
color.cprint(" None")
def print_variants(pkg):
"""output variants"""
color.cprint("")
color.cprint(section_title("Variants:"))
formatter = VariantFormatter(pkg.variants)
for line in formatter.lines:
color.cprint(color.cescape(line))
def print_versions(pkg):
"""output versions"""
color.cprint("")
color.cprint(section_title("Preferred version: "))
if not pkg.versions:
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Safe versions: "))
color.cprint(version(" None"))
color.cprint("")
color.cprint(section_title("Deprecated versions: "))
color.cprint(version(" None"))
else:
pad = padder(pkg.versions, 4)
preferred = preferred_version(pkg)
url = ""
if pkg.has_code:
url = fs.for_package_version(pkg, preferred)
line = version(" {0}".format(pad(preferred))) + color.cescape(url)
color.cprint(line)
safe = []
deprecated = []
for v in reversed(sorted(pkg.versions)):
if pkg.has_code:
url = fs.for_package_version(pkg, v)
if pkg.versions[v].get("deprecated", False):
deprecated.append((v, url))
else:
safe.append((v, url))
for title, vers in [("Safe", safe), ("Deprecated", deprecated)]:
color.cprint("")
color.cprint(section_title("{0} versions: ".format(title)))
if not vers:
color.cprint(version(" None"))
continue
for v, url in vers:
line = version(" {0}".format(pad(v))) + color.cescape(url)
color.cprint(line)
def print_virtuals(pkg):
"""output virtual packages"""
color.cprint("")
color.cprint(section_title("Virtual Packages: "))
if pkg.provided:
inverse_map = {}
for spec, whens in pkg.provided.items():
for when in whens:
if when not in inverse_map:
inverse_map[when] = set()
inverse_map[when].add(spec)
for when, specs in reversed(sorted(inverse_map.items())):
line = " %s provides %s" % (
when.colorized(),
", ".join(s.colorized() for s in specs),
)
print(line)
else:
color.cprint(" None")
def info(parser, args):
spec = spack.spec.Spec(args.package)
pkg_cls = spack.repo.path.get_pkg_class(spec.name)
pkg = pkg_cls(spec)
# Output core package information
header = section_title("{0}: ").format(pkg.build_system_class) + pkg.name
color.cprint(header)
color.cprint("")
color.cprint(section_title("Description:"))
if pkg.__doc__:
color.cprint(color.cescape(pkg.format_doc(indent=4)))
else:
color.cprint(" None")
color.cprint(section_title("Homepage: ") + pkg.homepage)
# Now output optional information in expected order
sections = [
(args.all or args.maintainers, print_maintainers),
(args.all or args.detectable, print_detectable),
(args.all or args.tags, print_tags),
(args.all or not args.no_versions, print_versions),
(args.all or not args.no_variants, print_variants),
(args.all or args.phases, print_phases),
(args.all or not args.no_dependencies, print_dependencies),
(args.all or args.virtuals, print_virtuals),
(args.all or args.tests, print_tests),
]
for print_it, func in sections:
if print_it:
func(pkg)
color.cprint("")
|
20,691 | ff414d11762151941f2be34a10edbd1da6b1572c | import numpy as np
import pandas as pd
import random
from sklearn.model_selection import StratifiedKFold
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import classification_report
import seaborn as sns
def delete_outliers(Xtrain, ytrain, variable):
'''
find IQR, and delete outliers
parameters:
-- df : pandas Dataframe which contains all the data
-- variable : the variable we want to treat
-- inplace : If True, do operation inplace and return None.
'''
df = pd.concat([Xtrain,ytrain], axis=1)
v_df = df[variable]
q25, q75 = np.percentile(v_df, 25), np.percentile(v_df, 75)
iqr = q75 - q25
# print('{}, Quartile 25: {} | Quartile 75: {} | IQR: {}'.format(variable, q25, q75, iqr))
lower, upper = q25 - iqr * 1.5, q75 + iqr * 1.5
new_df = df.drop(df[(df[variable] > upper) | (df[variable] < lower)].index)
y = new_df['Class']
X = new_df.drop(columns=['Class'])
return X, y
def CV_SMOTE(original_Xtrain, original_ytrain, model, params, n_iter, K):
'''
Cross-Validation with SMOTE, RandomizedSearchCV
Parameters:
-- model: ML model
-- params: hyper parameters to be fine-tuned
-- n_iter: numbers of iteration in RandomizedSearchCV
-- K: number of folds in CV
'''
# use RandomizedSearchCV for fine-tuning hyper parametres
cv = RandomizedSearchCV(model, params, n_iter=4)
# make sure that each fold has the data from all the classes
sss = StratifiedKFold(n_splits=K, random_state=None, shuffle=False)
for CV_train, CV_test in sss.split(original_Xtrain, original_ytrain):
smt = SMOTE()
# oversampling
oversampling_Xtrain, oversampling_ytrain = smt.fit_sample(original_Xtrain.iloc[CV_train], original_ytrain.iloc[CV_train])
# fine-tuning
search = cv.fit(oversampling_Xtrain, oversampling_ytrain)
best_est = search.best_estimator_
# prediction
prediction = best_est.predict(original_Xtrain.values[CV_test])
print(search.best_params_)
print(classification_report(original_ytrain.values[CV_test], prediction))
def save_corr(df):
ax = sns.heatmap(df.corr(), vmin = -1, vmax = 1, cmap='coolwarm')
ax.figure.savefig('../data/output/corr.jpg')
def plot_9_violinplot(df):
v_nums = [i for i in range(1, 29)]
v_choosed = random.choices(v_nums, k=9)
idx = 0
fig,ax = plt.subplots(3,3, figsize=(10,10))
for i in range(3):
for j in range(3):
sns.violinplot(df["V"+str(v_choosed[idx])], ax = ax[i][j])
idx += 1
fig.savefig('../data/output/9_violinplot.jpg') |
20,692 | 4d98c1a9558b47816d487ad2c255068c0a8e2b5b | # script which finds the tilt of a binary tree
#class Node(object):
# def __init__(self, x):
# self.left = None
# self.right = None
# self.val = x
class Solution(object):
def find_tilt(self, root):
self.tilt = 0
self.traverse(root)
return self.tilt
def traverse(self, root):
if root is None:
return 0
left = self.traverse(root.left)
right = self.traverse(root.right)
self.tilt += abs(left - right)
return left + right + root.val
|
20,693 | 0d327583686781ee6df3e2775cd992a54a3194d2 | # from . import summarizer_blueprint
# from summary import summarizeDriver
# from summary import summarizeNews
# @summarizer_blueprint.route('/sum')
# def generateSummary():
# print "in generateSummary"
# summarizeDriver.delay()
# # summarizeNews()
# return 'Summarizing'
# @summarizer_blueprint.route('/scheck')
# def scheck():
# return 's works' |
20,694 | 38e3d46ba5ee06bf02f933a25ff0fc876fbce647 | import fileinput, os, random, math
from bs_utils.utils import *
from bs_align_utils import *
#----------------------------------------------------------------
def extract_mapping(ali_file):
unique_hits = {}
non_unique_hits = {}
header0 = ""
family = []
for header, chr, no_mismatch, location1, cigar1, location2, cigar2 in process_aligner_output(ali_file, pair_end = True):
#------------------------
if header != header0:
# --- output ----
if len(family) == 1:
unique_hits[header0] = family[0]
elif len(family) > 1:
min_lst = min(family, key = lambda x: x[0])
max_lst = max(family, key = lambda x: x[0])
if min_lst[0] < max_lst[0]:
unique_hits[header0] = min_lst
else:
non_unique_hits[header0] = min_lst[0]
header0 = header
family = []
family.append((no_mismatch, chr, location1, cigar1, location2, cigar2))
if len(family) == 1:
unique_hits[header0] = family[0]
elif len(family) > 1:
min_lst = min(family, key = lambda x: x[0])
max_lst = max(family, key = lambda x: x[0])
if min_lst[0] < max_lst[0]:
unique_hits[header0] = min_lst
else:
non_unique_hits[header0] = min_lst[0]
return unique_hits, non_unique_hits
def _extract_mapping(ali_file):
U = {}
R = {}
header0 = ""
family = []
for line in fileinput.input(ali_file):
l = line.split()
header = l[0][:-2]
chr = str(l[1])
location = int(l[2])
#no_hits=int(l[4])
#-------- mismatches -----------
if len(l) == 4:
no_mismatch = 0
elif len(l) == 5:
no_mismatch = l[4].count(":")
else:
print l
#------------------------
if header != header0:
#--------------------
if header0 != "":
# --- output ----
if len(family) == 1:
U[header0] = family[0]
else:
if family[0][0] < family[1][0]:
U[header0] = family[0]
elif family[1][0] < family[0][0]:
U[header0] = family[1]
else:
R[header0] = family[0][0]
family=[]
# ---------------
header0 = header
family = [[no_mismatch, chr, location]]
member = 1
elif header == header0:
if member == 1:
family[-1][0] += no_mismatch
family[-1].append(location)
member = 2
elif member == 2:
family.append([no_mismatch, chr, location])
member = 1
#------------------------------
fileinput.close()
return U, R
#----------------------------------------------------------------
def bs_pair_end(main_read_file_1,
main_read_file_2,
asktag,
adapter_file,
cut1,
cut2,
no_small_lines,
indexname,
aligner_command,
db_path,
tmp_path,
outfile):
#----------------------------------------------------------------
adapter=""
adapterA=""
adapterB=""
if adapter_file !="":
adapter_inf=open(adapter_file,"r")
if asktag=="N": #<--- directional library
adapter=adapter_inf.readline()
adapter_inf.close()
adapter=adapter.rstrip("\n")
elif asktag=="Y":#<--- undirectional library
adapterA=adapter_inf.readline()
adapterB=adapter_inf.readline()
adapter_inf.close()
adapterA=adapterA.rstrip("\n")
adapterB=adapterB.rstrip("\n")
#----------------------------------------------------------------
logm("End 1 filename: %s"% main_read_file_1 )
logm("End 2 filename: %s"% main_read_file_2 )
logm("The first base (for mapping): %d"% cut1 )
logm("The last base (for mapping): %d"% cut2 )
logm("-------------------------------- " )
logm("Undirectional library: %s" % asktag )
logm("Bowtie path: %s"% aligner_command + '\n')
logm("Reference genome library path: %s"% db_path )
logm("Number of mismatches allowed: %s"% indexname )
if adapter_file !="":
if asktag=="Y":
logm("Adapters to be removed from 3' of the reads:" )
logm("-- A: %s" % adapterA )
logm("-- B: %s" % adapterB )
elif asktag=="N":
logm("Adapter to be removed from 3' of the reads:" )
logm("-- %s" % adapter )
logm("-------------------------------- " )
#----------------------------------------------------------------
# helper method to join fname with tmp_path
tmp_d = lambda fname: os.path.join(tmp_path, fname)
db_d = lambda fname: os.path.join(db_path, fname)
#----------------------------------------------------------------
# splitting the 2 big read files
input_fname1 = os.path.split(main_read_file_1)[1]
input_fname2 = os.path.split(main_read_file_2)[1]
# TODO: run these in parallel with a subprocess
split_file(main_read_file_1, tmp_d(input_fname1)+'-E1-', no_small_lines)
split_file(main_read_file_2, tmp_d(input_fname2)+'-E2-', no_small_lines)
dirList=os.listdir(tmp_path)
my_files = zip(sorted(filter(lambda fname: fname.startswith("%s-E1-" % input_fname1), dirList)),
sorted(filter(lambda fname: fname.startswith("%s-E2-" % input_fname2), dirList)))
#---- Stats ------------------------------------------------------------
all_raw_reads=0
all_trimed=0
all_mapped=0
all_mapped_passed=0
all_unmapped=0
numbers_premapped_lst=[0,0,0,0]
numbers_mapped_lst=[0,0,0,0]
mC_lst=[0,0,0]
uC_lst=[0,0,0]
no_my_files=0
#----------------------------------------------------------------
print "== Start mapping =="
for read_file_1, read_file_2 in my_files:
no_my_files+=1
random_id=".tmp-"+str(random.randint(1000000,9999999))
original_bs_reads_1 = {}
original_bs_reads_2 = {}
original_bs_reads_lst= [original_bs_reads_1, original_bs_reads_2]
if asktag=="Y":
#----------------------------------------------------------------
outfile_1FCT = tmp_d('Trimed_FCT_1.fa'+random_id)
outfile_1RCT = tmp_d('Trimed_RCT_1.fa'+random_id)
outfile_2FCT = tmp_d('Trimed_FCT_2.fa'+random_id)
outfile_2RCT = tmp_d('Trimed_RCT_2.fa'+random_id)
read_inf = open(tmp_d(read_file_1),"r")
oneline = read_inf.readline()
l = oneline.split()
input_format = ""
#if len(l)==5: # old solexa format
# input_format="old Solexa Seq file"
if oneline[0]=="@": # Illumina GAII FastQ (Lister et al Nature 2009)
input_format="FastQ"
n_fastq=0
elif len(l)==1 and oneline[0]!=">": # pure sequences
input_format="list of sequences"
elif len(l)==11: # Illumina GAII qseq file
input_format="Illumina GAII qseq file"
elif oneline[0]==">": # fasta
input_format="fasta"
n_fasta=0
read_inf.close()
print "Detected data format: %s" % input_format
#----------------------------------------------------------------
read_file_list = [read_file_1, read_file_2]
outfile_FCT_list = [outfile_1FCT, outfile_2FCT]
outfile_RCT_list = [outfile_1RCT, outfile_2RCT]
n_list = [0, 0]
for f in range(2):
read_file = read_file_list[f]
outf_FCT = open(outfile_FCT_list[f], 'w')
outf_RCT = open(outfile_RCT_list[f], 'w')
original_bs_reads = original_bs_reads_lst[f]
n = n_list[f]
id = ""
seq = ""
seq_ready = "N"
for line in fileinput.input(tmp_d(read_file)):
l=line.split()
if input_format=="old Solexa Seq file":
n+=1
id=str(n)
id=id.zfill(12)
seq=l[4]
seq_ready="Y"
elif input_format=="list of sequences":
n+=1
id=str(n)
id=id.zfill(12)
seq=l[0]
seq_ready="Y"
elif input_format=="FastQ":
m_fastq=math.fmod(n_fastq,4)
n_fastq+=1
seq_ready="N"
if m_fastq==0:
n+=1
id=str(n)
id=id.zfill(12)
seq=""
elif m_fastq==1:
seq=l[0]
seq_ready="Y"
else:
seq=""
elif input_format=="Illumina GAII qseq file":
n+=1
id=str(n)
id=id.zfill(12)
seq=l[8]
seq_ready="Y"
elif input_format=="fasta":
m_fasta=math.fmod(n_fasta,2)
n_fasta+=1
seq_ready="N"
if m_fasta==0:
n+=1
id=l[0][1:]
id=id.zfill(17)
seq=""
elif m_fasta==1:
seq=l[0]
seq_ready="Y"
else:
seq=""
#----------------------------------------------------------------
if seq_ready=="Y":
seq=seq[cut1-1:cut2] #<----------------------selecting 0..52 from 1..72 -e 52
seq=seq.upper()
seq=seq.replace(".","N")
#--striping BS adapter from 3' read --------------------------------------------------------------
if (adapterA !="") and (adapterB !=""):
signature=adapterA[:6]
if signature in seq:
signature_pos=seq.index(signature)
if seq[signature_pos:] in adapterA:
seq=seq[:signature_pos]#+"".join(["N" for x in range(len(seq)-len(signature_pos))])
all_trimed+=1
else:
signature=adapterB[:6]
if signature in seq:
#print id,seq,signature;
signature_pos=seq.index(signature)
if seq[signature_pos:] in adapterB:
seq=seq[:signature_pos]#+"".join(["N" for x in range(len(seq)-len(signature_pos))])
all_trimed+=1
if len(seq) <= 4:
seq = "N" * (cut2-cut1+1)
#--------- trimmed_raw_BS_read ------------------
original_bs_reads[id] = seq
#--------- FW_C2T ------------------
outf_FCT.write('>%s\n%s\n' % (id, seq.replace("C","T")))
#--------- RC_G2A ------------------
outf_RCT.write('>%s\n%s\n' % (id, seq.replace("G","A")))
n_list[f]=n
outf_FCT.close()
outf_RCT.close()
fileinput.close()
#print "All input end 1: %d , end 2: %d "%(n_list[0],n_list[1]);
all_raw_reads+=n
#--------------------------------------------------------------------------------
# Bowtie mapping
#--------------------------------------------------------------------------------
WC2T_fr=tmp_d("W_C2T_fr_m"+indexname+".mapping"+random_id)
WC2T_rf=tmp_d("W_C2T_rf_m"+indexname+".mapping"+random_id)
CC2T_fr=tmp_d("C_C2T_fr_m"+indexname+".mapping"+random_id)
CC2T_rf=tmp_d("C_C2T_rf_m"+indexname+".mapping"+random_id)
run_in_parallel([aligner_command % {'reference_genome' : os.path.join(db_path,'W_C2T'),
'input_file_1' : outfile_1FCT,
'input_file_2' : outfile_2RCT,
'output_file' : WC2T_fr},
aligner_command % {'reference_genome' : os.path.join(db_path,'C_C2T'),
'input_file_1' : outfile_1FCT,
'input_file_2' : outfile_2RCT,
'output_file' : CC2T_fr},
aligner_command % {'reference_genome' : os.path.join(db_path,'W_C2T'),
'input_file_1' : outfile_2FCT,
'input_file_2' : outfile_1RCT,
'output_file' : WC2T_rf},
aligner_command % {'reference_genome' : os.path.join(db_path,'C_C2T'),
'input_file_1' : outfile_2FCT,
'input_file_2' : outfile_1RCT,
'output_file' : CC2T_rf}])
delete_files(outfile_1FCT, outfile_2FCT, outfile_1RCT, outfile_2RCT)
#--------------------------------------------------------------------------------
# Post processing
#--------------------------------------------------------------------------------
FW_C2T_fr_U, FW_C2T_fr_R = extract_mapping(WC2T_fr)
FW_C2T_rf_U, FW_C2T_rf_R = extract_mapping(WC2T_rf)
RC_C2T_fr_U, RC_C2T_fr_R = extract_mapping(CC2T_fr)
RC_C2T_rf_U, RC_C2T_rf_R = extract_mapping(CC2T_rf)
delete_files(WC2T_fr, WC2T_rf, CC2T_fr, CC2T_rf)
#----------------------------------------------------------------
# get uniq-hit reads
#----------------------------------------------------------------
Union_set=set(FW_C2T_fr_U.iterkeys()) | set(FW_C2T_rf_U.iterkeys()) | set(RC_C2T_fr_U.iterkeys()) | set(RC_C2T_rf_U.iterkeys())
Unique_FW_fr_C2T=set() # +
Unique_FW_rf_C2T=set() # +
Unique_RC_fr_C2T=set() # -
Unique_RC_rf_C2T=set() # -
for x in Union_set:
list=[]
for d in [FW_C2T_fr_U, FW_C2T_rf_U, RC_C2T_fr_U, RC_C2T_rf_U]:
mis_lst=d.get(x,[99])
mis=int(mis_lst[0])
list.append(mis)
for d in [FW_C2T_fr_R, FW_C2T_rf_R, RC_C2T_fr_R, RC_C2T_rf_R]:
mis=d.get(x,99)
list.append(mis)
mini=min(list)
if list.count(mini)==1:
mini_index=list.index(mini)
if mini_index==0:
Unique_FW_fr_C2T.add(x)
elif mini_index==1:
Unique_FW_rf_C2T.add(x)
elif mini_index==2:
Unique_RC_fr_C2T.add(x)
elif mini_index==3:
Unique_RC_rf_C2T.add(x)
del Union_set
del FW_C2T_fr_R
del FW_C2T_rf_R
del RC_C2T_fr_R
del RC_C2T_rf_R
FW_C2T_fr_uniq_lst=[[FW_C2T_fr_U[u][1],u] for u in Unique_FW_fr_C2T]
FW_C2T_rf_uniq_lst=[[FW_C2T_rf_U[u][1],u] for u in Unique_FW_rf_C2T]
RC_C2T_fr_uniq_lst=[[RC_C2T_fr_U[u][1],u] for u in Unique_RC_fr_C2T]
RC_C2T_rf_uniq_lst=[[RC_C2T_rf_U[u][1],u] for u in Unique_RC_rf_C2T]
FW_C2T_fr_uniq_lst.sort()
FW_C2T_rf_uniq_lst.sort()
RC_C2T_fr_uniq_lst.sort()
RC_C2T_rf_uniq_lst.sort()
FW_C2T_fr_uniq_lst=[x[1] for x in FW_C2T_fr_uniq_lst]
FW_C2T_rf_uniq_lst=[x[1] for x in FW_C2T_rf_uniq_lst]
RC_C2T_fr_uniq_lst=[x[1] for x in RC_C2T_fr_uniq_lst]
RC_C2T_rf_uniq_lst=[x[1] for x in RC_C2T_rf_uniq_lst]
#----------------------------------------------------------------
numbers_premapped_lst[0]+=len(Unique_FW_fr_C2T)
numbers_premapped_lst[1]+=len(Unique_FW_rf_C2T)
numbers_premapped_lst[2]+=len(Unique_RC_fr_C2T)
numbers_premapped_lst[3]+=len(Unique_RC_rf_C2T)
del Unique_FW_fr_C2T
del Unique_FW_rf_C2T
del Unique_RC_fr_C2T
del Unique_RC_rf_C2T
#logm("U -- %d FW-RC strand bs-unique pairs (mapped to Watson)"%(n1) )
#logm("U -- %d RC-FW strand bs-unique pairs (mapped to Crick)"%(n2) )
#logm("U -- %d bs-unique pairs"%(n12) )
#logm("-------------------------------- " )
#print "# %10d FW-RC bs-unique reads (mapped to Watson)"%(n1);
#print "# %10d RC-FW bs-unique reads (mapped to Watson)"%(n2);
#print "# %10d FW-RC bs-unique reads (mapped to Crick)"%(n3);
#print "# %10d RC-FW bs-unique reads (mapped to Crick)"%(n4);
#----------------------------------------------------------------
nn = 0
for ali_unique_lst, ali_dic in [(FW_C2T_fr_uniq_lst,FW_C2T_fr_U),
(FW_C2T_rf_uniq_lst,FW_C2T_rf_U),
(RC_C2T_fr_uniq_lst,RC_C2T_fr_U),
(RC_C2T_rf_uniq_lst,RC_C2T_rf_U)]:
nn += 1
mapped_chr0 = ""
for header in ali_unique_lst:
_, mapped_chr, mapped_location_1, cigar1, mapped_location_2, cigar2 = ali_dic[header]
#-------------------------------------
if mapped_chr != mapped_chr0:
my_gseq=deserialize(db_d(mapped_chr))
chr_length=len(my_gseq)
mapped_chr0=mapped_chr
#-------------------------------------
if nn == 1 or nn == 3:
original_BS_1 = original_bs_reads_1[header]
original_BS_2 = reverse_compl_seq(original_bs_reads_2[header])
else:
original_BS_1 = original_bs_reads_2[header]
original_BS_2 = reverse_compl_seq(original_bs_reads_1[header])
r_start_1, r_end_1, g_len_1 = get_read_start_end_and_genome_length(cigar1)
r_start_2, r_end_2, g_len_2 = get_read_start_end_and_genome_length(cigar2)
all_mapped += 1
if nn == 1: # FW-RC mapped to + strand:
FR = "+FR"
# mapped_location_1 += 1
# origin_genome_long_1 = my_gseq[mapped_location_1 - 2 - 1 : mapped_location_1 + g_len_1 + 2 - 1]
# origin_genome_1 = origin_genome_long_1[2:-2]
mapped_strand_1 = "+"
# mapped_location_2 += 1
# origin_genome_long_2 = my_gseq[mapped_location_2 - 2 - 1 : mapped_location_2 + g_len_2 + 2 - 1]
# origin_genome_2 = origin_genome_long_2[2:-2]
mapped_strand_2 = "+"
elif nn==2: # RC-FW mapped to + strand:
# original_BS_1 = original_bs_reads_2[header]
# original_BS_2 = reverse_compl_seq(original_bs_reads_1[header])
FR = "+RF"
# mapped_location_1 += 1
# origin_genome_long_1 = my_gseq[mapped_location_1 - 2 - 1 : mapped_location_1 + g_len_1 + 2 - 1]
# origin_genome_1 = origin_genome_long_1[2:-2]
mapped_strand_1 = "+"
# mapped_location_2 += 1
# origin_genome_long_2 = my_gseq[mapped_location_2 - 2 - 1 : mapped_location_2 + g_len_2 + 2 - 1]
# origin_genome_2 = origin_genome_long_2[2:-2]
mapped_strand_2 = "+"
elif nn==3: # FW-RC mapped to - strand:
# original_BS_1=original_bs_reads_1[header]
# original_BS_2=reverse_compl_seq(original_bs_reads_2[header])
FR = "-FR"
mapped_location_1 = chr_length - mapped_location_1 - g_len_1
# origin_genome_long_1 = my_gseq[mapped_location_1 - 2 - 1 : mapped_location_1 + g_len_1 + 2 - 1]
# origin_genome_long_1 = reverse_compl_seq(origin_genome_long_1)
# origin_genome_1 = origin_genome_long_1[2:-2]
mapped_strand_1 = "-"
mapped_location_2 = chr_length - mapped_location_2 - g_len_2
# origin_genome_long_2 = reverse_compl_seq(my_gseq[mapped_location_2 - 2 - 1 : mapped_location_2 + g_len_2 + 2 - 1 ])
# origin_genome_2 = origin_genome_long_2[2:-2]
mapped_strand_2 = "-"
elif nn==4: # RC-FW mapped to - strand:
# original_BS_1=original_bs_reads_2[header]
# original_BS_2=reverse_compl_seq(original_bs_reads_1[header])
FR = "-RF"
mapped_location_1 = chr_length - mapped_location_1 - g_len_1
# origin_genome_long_1 = my_gseq[mapped_location_1 - 2 - 1 : mapped_location_1 + g_len_1 + 2 - 1]
# origin_genome_long_1 = reverse_compl_seq(origin_genome_long_1)
# origin_genome_1 = origin_genome_long_1[2:-2]
mapped_strand_1 = "-"
mapped_location_2 = chr_length - mapped_location_2 - g_len_2
# origin_genome_long_2 = reverse_compl_seq(my_gseq[mapped_location_2 - 2 - 1 : mapped_location_2 + g_len_2 + 2 - 1])
# origin_genome_2 = origin_genome_long_2[2:-2]
mapped_strand_2 = "-"
origin_genome_1, next_1, output_genome_1 = get_genomic_sequence(my_gseq, mapped_location_1, mapped_location_1 + g_len_1, mapped_strand_1)
origin_genome_2, next_2, output_genome_2 = get_genomic_sequence(my_gseq, mapped_location_2, mapped_location_2 + g_len_2, mapped_strand_2)
r_aln_1, g_aln_1 = cigar_to_alignment(cigar1, original_BS_1, origin_genome_1)
r_aln_2, g_aln_2 = cigar_to_alignment(cigar2, original_BS_2, origin_genome_2)
N_mismatch_1 = N_MIS(r_aln_1, g_aln_1) #+ original_BS_length_1 - (r_end_1 - r_start_1) # mismatches in the alignment + soft clipped nucleotides
N_mismatch_2 = N_MIS(r_aln_2, g_aln_2) #+ original_BS_length_2 - (r_end_2 - r_start_2) # mismatches in the alignment + soft clipped nucleotides
if max(N_mismatch_1, N_mismatch_2) <= int(indexname) :
all_mapped_passed += 1
numbers_mapped_lst[nn-1] += 1
#---- unmapped -------------------------
del original_bs_reads_1[header]
del original_bs_reads_2[header]
#---------------------------------------
# output_genome_1 = origin_genome_long_1[0:2] + "_" + origin_genome_1 + "_" + origin_genome_long_1[-2:]
# output_genome_2 = origin_genome_long_2[0:2] + "_" + origin_genome_2 + "_" + origin_genome_long_2[-2:]
methy_1=methy_seq(r_aln_1, g_aln_1 + next_1)
methy_2=methy_seq(r_aln_2, g_aln_2 + next_2)
mC_lst, uC_lst = mcounts(methy_1, mC_lst, uC_lst)
mC_lst, uC_lst = mcounts(methy_2, mC_lst, uC_lst)
#---STEVE FILTER----------------
condense_seq_1 = methy_1.replace('-','')
STEVE_1 = 0
if "ZZZ" in condense_seq_1:
STEVE_1=1
condense_seq_2 = methy_2.replace('-','')
STEVE_2 = 0
if "ZZZ" in condense_seq_2:
STEVE_2=1
outfile.store(header, N_mismatch_1, FR, mapped_chr, mapped_strand_1, mapped_location_1, cigar1, original_BS_1, methy_1, STEVE_1,
output_genome = output_genome_1, rnext = mapped_chr, pnext = mapped_location_2)
outfile.store(header, N_mismatch_2, FR, mapped_chr, mapped_strand_2, mapped_location_2, cigar2, original_BS_2, methy_2, STEVE_2,
output_genome = output_genome_2, rnext = mapped_chr, pnext = mapped_location_1)
print "--> %s %s (%d/%d) " % (read_file_1, read_file_2, no_my_files, len(my_files))
#----------------------------------------------------------------
# output unmapped pairs
#----------------------------------------------------------------
unmapped_lst=original_bs_reads_1.keys()
unmapped_lst.sort()
# for u in unmapped_lst:
# outf_u1.write("%s\n"%original_bs_reads_1[u])
# outf_u2.write("%s\n"%original_bs_reads_2[u])
all_unmapped += len(unmapped_lst)
if asktag=="N":
#----------------------------------------------------------------
outfile_1FCT= tmp_d('Trimed_FCT_1.fa'+random_id)
outfile_2FCT= tmp_d('Trimed_FCT_2.fa'+random_id)
read_inf=open(tmp_d(read_file_1),"r")
oneline=read_inf.readline()
l=oneline.split()
input_format=""
#if len(l)==5: # old solexa format
# input_format="old Solexa Seq file"
if oneline[0]=="@": # Illumina GAII FastQ (Lister et al Nature 2009)
input_format="FastQ"
n_fastq=0
elif len(l)==1 and oneline[0]!=">": # pure sequences
input_format="list of sequences"
elif len(l)==11: # Illumina GAII qseq file
input_format="Illumina GAII qseq file"
elif oneline[0]==">": # fasta
input_format="fasta"
n_fasta=0
read_inf.close()
print "Detected data format: %s" % input_format
#----------------------------------------------------------------
read_file_list=[read_file_1,read_file_2]
outfile_FCT_list=[outfile_1FCT,outfile_2FCT]
n_list=[0,0]
for f in range(2):
read_file=read_file_list[f]
outf_FCT=open(outfile_FCT_list[f],'w')
original_bs_reads = original_bs_reads_lst[f]
n=n_list[f]
id=""
seq=""
seq_ready="N"
for line in fileinput.input(tmp_d(read_file)):
l=line.split()
if input_format=="old Solexa Seq file":
n+=1
id=str(n)
id=id.zfill(12)
seq=l[4]
seq_ready="Y"
elif input_format=="list of sequences":
n+=1
id=str(n)
id=id.zfill(12)
seq=l[0]
seq_ready="Y"
elif input_format=="FastQ":
m_fastq=math.fmod(n_fastq,4)
n_fastq+=1
seq_ready="N"
if m_fastq==0:
n+=1
id=str(n)
id=id.zfill(12)
seq=""
elif m_fastq==1:
seq=l[0]
seq_ready="Y"
else:
seq=""
elif input_format=="Illumina GAII qseq file":
n+=1
id=str(n)
id=id.zfill(12)
seq=l[8]
seq_ready="Y"
elif input_format=="fasta":
m_fasta=math.fmod(n_fasta,2)
n_fasta+=1
seq_ready="N"
if m_fasta==0:
n+=1
id=l[0][1:]
id=id.zfill(17)
seq=""
elif m_fasta==1:
seq=l[0]
seq_ready="Y"
else:
seq=""
#----------------------------------------------------------------
if seq_ready=="Y":
seq=seq[cut1-1:cut2] #<----------------------selecting 0..52 from 1..72 -e 52
seq=seq.upper()
seq=seq.replace(".","N")
#--striping BS adapter from 3' read --------------------------------------------------------------
if (adapterA !="") and (adapterB !=""):
signature=adapterA[:6]
if signature in seq:
signature_pos=seq.index(signature)
if seq[signature_pos:] in adapterA:
seq=seq[:signature_pos]#+"".join(["N" for x in range(len(seq)-len(signature_pos))])
all_trimed+=1
else:
signature=adapterB[:6]
if signature in seq:
#print id,seq,signature;
signature_pos=seq.index(signature)
if seq[signature_pos:] in adapterB:
seq=seq[:signature_pos]#+"".join(["N" for x in range(len(seq)-len(signature_pos))])
all_trimed+=1
if len(seq) <= 4:
seq = "N" * (cut2-cut1+1)
#--------- trimmed_raw_BS_read ------------------
original_bs_reads[id] = seq
#--------- FW_C2T ------------------
if f==0:
outf_FCT.write('>%s\n%s\n'% (id, seq.replace("C","T")))
elif f==1:
outf_FCT.write('>%s\n%s\n'% (id, reverse_compl_seq(seq).replace("C","T")))
n_list[f]=n
outf_FCT.close()
fileinput.close()
#print "All input end 1: %d , end 2: %d "%(n_list[0],n_list[1]);
all_raw_reads+=n
#--------------------------------------------------------------------------------
# Bowtie mapping
#--------------------------------------------------------------------------------
WC2T_fr=tmp_d("W_C2T_fr_m"+indexname+".mapping"+random_id)
CC2T_fr=tmp_d("C_C2T_fr_m"+indexname+".mapping"+random_id)
run_in_parallel([ aligner_command % {'reference_genome' : os.path.join(db_path,'W_C2T'),
'input_file_1' : outfile_1FCT,
'input_file_2' : outfile_2FCT,
'output_file' : WC2T_fr},
aligner_command % {'reference_genome' : os.path.join(db_path,'C_C2T'),
'input_file_1' : outfile_1FCT,
'input_file_2' : outfile_2FCT,
'output_file' : CC2T_fr} ])
delete_files(outfile_1FCT, outfile_2FCT)
#--------------------------------------------------------------------------------
# Post processing
#--------------------------------------------------------------------------------
FW_C2T_fr_U, FW_C2T_fr_R = extract_mapping(WC2T_fr)
RC_C2T_fr_U, RC_C2T_fr_R = extract_mapping(CC2T_fr)
#----------------------------------------------------------------
# get uniq-hit reads
#----------------------------------------------------------------
Union_set = set(FW_C2T_fr_U.iterkeys()) | set(RC_C2T_fr_U.iterkeys())
Unique_FW_fr_C2T = set() # +
Unique_RC_fr_C2T = set() # -
for x in Union_set:
list = []
for d in [FW_C2T_fr_U, RC_C2T_fr_U]:
mis_lst = d.get(x,[99])
mis = int(mis_lst[0])
list.append(mis)
for d in [FW_C2T_fr_R, RC_C2T_fr_R]:
mis = d.get(x,99)
list.append(mis)
mini = min(list)
if list.count(mini) == 1:
mini_index = list.index(mini)
if mini_index == 0:
Unique_FW_fr_C2T.add(x)
elif mini_index == 1:
Unique_RC_fr_C2T.add(x)
FW_C2T_fr_uniq_lst=[[FW_C2T_fr_U[u][1],u] for u in Unique_FW_fr_C2T]
RC_C2T_fr_uniq_lst=[[RC_C2T_fr_U[u][1],u] for u in Unique_RC_fr_C2T]
FW_C2T_fr_uniq_lst.sort()
RC_C2T_fr_uniq_lst.sort()
FW_C2T_fr_uniq_lst=[x[1] for x in FW_C2T_fr_uniq_lst]
RC_C2T_fr_uniq_lst=[x[1] for x in RC_C2T_fr_uniq_lst]
#----------------------------------------------------------------
numbers_premapped_lst[0]+=len(Unique_FW_fr_C2T)
numbers_premapped_lst[1]+=len(Unique_RC_fr_C2T)
#logm("U -- %d FW-RC strand bs-unique pairs (mapped to Watson)"%(n1) )
#logm("U -- %d RC-FW strand bs-unique pairs (mapped to Crick)"%(n2)Z)
#logm("U -- %d bs-unique pairs"%(n12) )
#logm("-------------------------------- " )
#print "# %10d FW-RC bs-unique reads (mapped to Watson)"%(n1);
#print "# %10d RC-FW bs-unique reads (mapped to Watson)"%(n2);
#print "# %10d FW-RC bs-unique reads (mapped to Crick)"%(n3);
#print "# %10d RC-FW bs-unique reads (mapped to Crick)"%(n4);
#----------------------------------------------------------------
nn = 0
for ali_unique_lst, ali_dic in [(FW_C2T_fr_uniq_lst,FW_C2T_fr_U), (RC_C2T_fr_uniq_lst,RC_C2T_fr_U)]:
nn += 1
mapped_chr0 = ""
for header in ali_unique_lst:
_, mapped_chr, mapped_location_1, cigar1, mapped_location_2, cigar2 = ali_dic[header]
#-------------------------------------
if mapped_chr != mapped_chr0:
my_gseq = deserialize(db_d(mapped_chr))
chr_length = len(my_gseq)
mapped_chr0 = mapped_chr
#-------------------------------------
original_BS_1 = original_bs_reads_1[header]
original_BS_2 = reverse_compl_seq(original_bs_reads_2[header])
r_start_1, r_end_1, g_len_1 = get_read_start_end_and_genome_length(cigar1)
r_start_2, r_end_2, g_len_2 = get_read_start_end_and_genome_length(cigar2)
all_mapped += 1
if nn == 1: # FW-RC mapped to + strand:
FR = "+FR"
# mapped_location_1 += 1
# origin_genome_long_1 = my_gseq[mapped_location_1 - 2 - 1 : mapped_location_1 + g_len_1 + 2 - 1]
# origin_genome_1 = origin_genome_long_1[2:-2]
mapped_strand_1 = "+"
# mapped_location_2 += 1
# origin_genome_long_2 = my_gseq[mapped_location_2 - 2 - 1 : mapped_location_2 + g_len_2 + 2 - 1]
# origin_genome_2 = origin_genome_long_2[2:-2]
mapped_strand_2 = "+"
elif nn == 2: # FW-RC mapped to - strand:
FR="-FR"
mapped_location_1 = chr_length - mapped_location_1 - g_len_1
# origin_genome_long_1 = my_gseq[mapped_location_1 - 2 - 1 : mapped_location_1 + g_len_1 + 2 - 1]
# origin_genome_long_1 = reverse_compl_seq(origin_genome_long_1)
# origin_genome_1 = origin_genome_long_1[2:-2]
mapped_strand_1 = "-"
mapped_location_2 = chr_length - mapped_location_2 - g_len_2
# origin_genome_long_2 = reverse_compl_seq(my_gseq[mapped_location_2 - 2 - 1 : mapped_location_2 + g_len_2 + 2 - 1])
# origin_genome_2 = origin_genome_long_2[2:-2]
mapped_strand_2 = "-"
origin_genome_1, next_1, output_genome_1 = get_genomic_sequence(my_gseq, mapped_location_1, mapped_location_1 + g_len_1, mapped_strand_1)
origin_genome_2, next_2, output_genome_2 = get_genomic_sequence(my_gseq, mapped_location_2, mapped_location_2 + g_len_2, mapped_strand_2)
r_aln_1, g_aln_1 = cigar_to_alignment(cigar1, original_BS_1, origin_genome_1)
r_aln_2, g_aln_2 = cigar_to_alignment(cigar2, original_BS_2, origin_genome_2)
N_mismatch_1 = N_MIS(r_aln_1, g_aln_1) #+ original_BS_length_1 - (r_end_1 - r_start_1) # mismatches in the alignment + soft clipped nucleotides
N_mismatch_2 = N_MIS(r_aln_2, g_aln_2) #+ original_BS_length_2 - (r_end_2 - r_start_2) # mismatches in the alignment + soft clipped nucleotides
if max(N_mismatch_1, N_mismatch_2) <= int(indexname):
numbers_mapped_lst[nn-1] += 1
all_mapped_passed += 1
#---- unmapped -------------------------
del original_bs_reads_1[header]
del original_bs_reads_2[header]
#---------------------------------------
# output_genome_1 = origin_genome_long_1[0:2] + "_" + origin_genome_1 + "_" + origin_genome_long_1[-2:]
# output_genome_2 = origin_genome_long_2[0:2] + "_" + origin_genome_2 + "_" + origin_genome_long_2[-2:]
methy_1=methy_seq(r_aln_1, g_aln_1 + next_1)
methy_2=methy_seq(r_aln_2, g_aln_2 + next_2)
mC_lst,uC_lst = mcounts(methy_1, mC_lst, uC_lst)
mC_lst,uC_lst = mcounts(methy_2, mC_lst, uC_lst)
#---STEVE FILTER----------------
condense_seq_1 = methy_1.replace('-','')
STEVE_1=0
if "ZZZ" in condense_seq_1:
STEVE_1=1
condense_seq_2 = methy_2.replace('-','')
STEVE_2=0
if "ZZZ" in condense_seq_2:
STEVE_2=1
outfile.store(header, N_mismatch_1, FR, mapped_chr, mapped_strand_1, mapped_location_1, cigar1, original_BS_1, methy_1, STEVE_1,
output_genome = output_genome_1, rnext = mapped_chr, pnext = mapped_location_2)
outfile.store(header, N_mismatch_2, FR, mapped_chr, mapped_strand_2, mapped_location_2, cigar2, original_BS_2, methy_2, STEVE_2,
output_genome = output_genome_2, rnext = mapped_chr, pnext = mapped_location_1)
print "--> %s %s (%d/%d) " % (read_file_1, read_file_2, no_my_files, len(my_files))
#----------------------------------------------------------------
# output unmapped pairs
#----------------------------------------------------------------
unmapped_lst=original_bs_reads_1.keys()
unmapped_lst.sort()
# for u in unmapped_lst:
# outf_u1.write("%s\n"%(original_bs_reads_1[u]))
# outf_u2.write("%s\n"%(original_bs_reads_2[u]) )
all_unmapped+=len(unmapped_lst)
#==================================================================================================
# outf.close()
#
# outf_u1.close()
# outf_u2.close()
delete_files(tmp_path)
logm("-------------------------------- " )
logm("O Number of raw BS-read pairs: %d ( %d bp)"%(all_raw_reads,cut2-cut1+1) )
logm("O Number of ends trimmed for adapter: %d"% all_trimed+"\n")
if all_raw_reads >0:
logm("O Number of unique-hits read pairs for post-filtering: %d" % all_mapped + "\n")
if asktag=="Y":
logm("O -- %7d FW-RC pairs mapped to Watson strand (before post-filtering)"%(numbers_premapped_lst[0]) )
logm("O -- %7d RC-FW pairs mapped to Watson strand (before post-filtering)"%(numbers_premapped_lst[1]) )
logm("O -- %7d FW-RC pairs mapped to Crick strand (before post-filtering)"%(numbers_premapped_lst[2]) )
logm("O -- %7d RC-FW pairs mapped to Crick strand (before post-filtering)"%(numbers_premapped_lst[3]) )
elif asktag=="N":
logm("O -- %7d FW-RC pairs mapped to Watson strand (before post-filtering)"%(numbers_premapped_lst[0]) )
logm("O -- %7d FW-RC pairs mapped to Crick strand (before post-filtering)"%(numbers_premapped_lst[1]) )
logm("O --- %d uniqlely aligned pairs, where each end has mismatches <= %s"%(all_mapped_passed, indexname) )
if asktag=="Y":
logm("O ----- %7d FW-RC pairs mapped to Watson strand"%(numbers_mapped_lst[0]) )
logm("O ----- %7d RC-FW pairs mapped to Watson strand"%(numbers_mapped_lst[1]) )
logm("O ----- %7d FW-RC pairs mapped to Crick strand"%(numbers_mapped_lst[2]) )
logm("O ----- %7d RC-FW pairs mapped to Crick strand"%(numbers_mapped_lst[3]) )
elif asktag=="N":
logm("O ----- %7d FW-RC pairs mapped to Watson strand"%(numbers_mapped_lst[0]) )
logm("O ----- %7d FW-RC pairs mapped to Crick strand"%(numbers_mapped_lst[1]) )
logm("O Mapability= %1.4f%%"%(100*float(all_mapped_passed)/all_raw_reads) )
logm("O Unmapped read pairs: %d"% all_unmapped+"\n")
n_CG=mC_lst[0]+uC_lst[0]
n_CHG=mC_lst[1]+uC_lst[1]
n_CHH=mC_lst[2]+uC_lst[2]
logm("-------------------------------- " )
logm("Methylated C in mapped reads " )
logm(" mCG %1.3f%%"%((100*float(mC_lst[0])/n_CG) if n_CG != 0 else 0) )
logm(" mCHG %1.3f%%"%((100*float(mC_lst[1])/n_CHG) if n_CHG != 0 else 0) )
logm(" mCHH %1.3f%%"%((100*float(mC_lst[2])/n_CHH) if n_CHH != 0 else 0) )
logm("----------------------------------------------" )
logm("------------------- END ----------------------" )
elapsed("=== END %s %s ===" % (main_read_file_1, main_read_file_2))
|
20,695 | 2d8ef48a21276878e8c2f47daa86b2fc2598096f | from classes.profesor import Profesor
from classes.profesor_curso import Profesor_curso
from classes.curso import Curso
from classes.periodo import Periodo
from classes.salon import Salon
from classes.malla import Malla
from helpers.helper import input_data, print_table, pregunta
from helpers.menu import Menu
class Habilitar_controller:
def __init__(self):
self.profesor = Profesor()
self.curso = Curso()
self.profesor_curso = Profesor_curso()
self.periodo = Periodo()
self.salon = Salon()
self.malla = Malla()
self.salir = False
def menu(self):
while True:
try:
print('''
=======================
Habilitar Cursos
=======================
''')
menu = ["Habilitar salon", "Salir"]
respuesta = Menu(menu).show()
if respuesta == 1:
self.habilitar_salones()
else:
self.salir = True
break
except Exception as e:
print(f'{str(e)}')
def habilitar_salones(self):
print('''
=============================================
Habilitar curso por salones y periodo
=============================================
''')
print('De la siguiente lista elija el periodo que se habilitara >>\n')
periodos = self.periodo.obtener_periodos('id_periodo')
print(print_table(periodos, ['ID', 'Nombre', 'Desde', 'Hasta']))
id_periodo_elegido = input_data("\nEscriba el ID del periodo escolar >> ", "int")
buscar_periodo = self.periodo.obtener_periodo({'id_periodo': id_periodo_elegido})
if not buscar_periodo:
print('\nEl periodo elegido no existe !')
return
print(f'\nElija el salon para el periodo: {buscar_periodo[1]} >> ')
salones = self.salon.obtener_salones('id_salon')
print(print_table(salones, ['ID', 'Nombre']))
id_salon_elegido = input_data("\nEscriba el ID del salon >> ", "int")
buscar_salon = self.salon.obtener_salon({'id_salon': id_salon_elegido})
if not buscar_salon:
print('\nEl salon elegido no existe !')
return
print(f'\nElija el curso a habilitar para el salon: {buscar_salon[1]} >> ')
cursos_profesores = self.profesor_curso.obtener_profesor_cursos('id_profesor_curso')
cursos_disponibles = []
if cursos_profesores:
for pro_cur in cursos_profesores:
curso_id = pro_cur[2]
profesor_id = pro_cur[1]
id_profesor_curso = pro_cur[0]
curso = self.curso.obtener_curso({'curso_id': curso_id})
curso_nombre = curso[1]
profesor = self.profesor.obtener_profesor({'profesor_id': profesor_id})
profesor_nombre = profesor[1]
mallas = self.malla.buscar_mallas({
'id_periodo': id_periodo_elegido,
'id_salon': id_salon_elegido,
'id_profesor_curso': id_profesor_curso
})
if not mallas:
cursos_disponibles.append({
'codigo': id_profesor_curso,
'curso': curso_nombre,
'profesor_asignado': profesor_nombre
})
else:
for curso_disponible in cursos_disponibles:
if curso_disponible['curso'] == curso_nombre:
cursos_disponibles.remove(curso_disponible)
print(print_table(cursos_disponibles))
id_profesor_curso = input_data('\nEscriba el codigo del curso disponible >> ', 'int')
buscar_profesor_curso = self.profesor_curso.obtener_profesor_curso({'id_profesor_curso': id_profesor_curso})
if not buscar_profesor_curso:
print('\nEl codigo escogido no existe como curso disponible')
return
mallas = self.malla.buscar_mallas({
'id_periodo': id_periodo_elegido,
'id_salon': id_salon_elegido
})
if mallas:
for malla in mallas:
pro_curso = self.profesor_curso.obtener_profesor_curso({'id_profesor_curso': malla[3]})
if pro_curso[2] == buscar_profesor_curso[2]:
print('\nEste curso ya existe para el salon en el periodo escogido !')
return
self.malla.guardar_malla({
'id_periodo': id_periodo_elegido,
'id_salon': id_salon_elegido,
'id_profesor_curso': id_profesor_curso
})
print('''
=============================================================
Curso habilitado con exito para el salon y el periodo
=============================================================
''')
input('\nPresione una tecla para continuar...')
|
20,696 | 4046b302c697f0988dc7bc8f73c3cf8c401bea55 | class Solution:
def numSplits(self, s: str) -> int:
# 遍历统计正向到每个坐标的不同字符的数量
nums = [0]
now, lst = 0, set()
for ch in s:
if ch not in lst:
lst.add(ch)
now += 1
nums.append(now)
nums.pop()
# 反向遍历每个坐标的不同字符数量与正向是否相同
ans = 0
now, lst = 0, set()
for ch in s[::-1]:
if ch not in lst:
lst.add(ch)
now += 1
if nums.pop() == now:
ans += 1
return ans
if __name__ == "__main__":
print(Solution().numSplits(s="aacaba")) # 2
print(Solution().numSplits(s="abcd")) # 1
print(Solution().numSplits(s="aaaaa")) # 4
print(Solution().numSplits(s="acbadbaada")) # 2
|
20,697 | db3755ba74099bc4139446d0817e9a5aaa74d9dd | ############################
# turning off the warnings #
import warnings
warnings.filterwarnings('ignore')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow.python.util.deprecation as deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
#####################
# importing modules #
import pickle
import numpy as np
from common.utils import create_directory
from common.callbacks import OutputAEImgCallback
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Activation, LeakyReLU, BatchNormalization, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
######################
# defining the class #
class AE():
def __init__(self,
input_dim, z_dim,
encoder_conv_filters, encoder_conv_kernel_size, encoder_conv_strides, # parameters for encoder
decoder_conv_t_filters, decoder_conv_t_kernel_size, decoder_conv_t_strides, # parameters for decoder
use_batch_norm, use_dropout, # parameters for regularization
export_path
):
self.input_dim = input_dim
self.z_dim = z_dim
self.encoder_conv_filters = encoder_conv_filters
self.encoder_conv_kernel_size = encoder_conv_kernel_size
self.encoder_conv_strides = encoder_conv_strides
self.decoder_conv_t_filters = decoder_conv_t_filters
self.decoder_conv_t_kernel_size = decoder_conv_t_kernel_size
self.decoder_conv_t_strides = decoder_conv_t_strides
self.use_batch_norm = use_batch_norm
self.use_dropout = use_dropout
self.export_path = export_path
self._build()
def _build(self):
input_dim = self.input_dim
z_dim = self.z_dim
encoder_conv_filters = self.encoder_conv_filters
encoder_conv_kernel_size = self.encoder_conv_kernel_size
encoder_conv_strides = self.encoder_conv_strides
decoder_conv_t_filters = self.decoder_conv_t_filters
decoder_conv_t_kernel_size = self.decoder_conv_t_kernel_size
decoder_conv_t_strides = self.decoder_conv_t_strides
use_batch_norm = self.use_batch_norm
use_dropout = self.use_dropout
### encoder
num_encoder_layers = len(encoder_conv_filters)
encoder_input_layer = Input(shape=input_dim, name='encoder_input')
x = encoder_input_layer
for i in range(num_encoder_layers):
x = Conv2D(
filters = encoder_conv_filters[i],
kernel_size = encoder_conv_kernel_size[i],
strides = encoder_conv_strides[i],
padding = 'same',
name = 'encoder_conv' + str(i)
)(x)
x = LeakyReLU()(x)
if use_batch_norm:
x = BatchNormalization()(x)
if use_dropout:
x = Dropout(rate=0.3)(x)
shape_before_flattening = x.shape[1:] # 1: from (None, 7, 7, 64) to (7, 7, 64)
x = Flatten()(x)
encoder_output_layer = Dense(z_dim, name='encoder_output')(x) # the latent space
self.encoder = Model(encoder_input_layer, encoder_output_layer, name='Encoder')
### decoder
num_decoder_layers = len(decoder_conv_t_filters)
decoder_input_layer = Input(shape=z_dim, name='decoder_input')
x = Dense(np.prod(shape_before_flattening))(decoder_input_layer)
x = Reshape(shape_before_flattening)(x)
for i in range(num_decoder_layers):
x = Conv2DTranspose(
filters = decoder_conv_t_filters[i],
kernel_size = decoder_conv_t_kernel_size[i],
strides = decoder_conv_t_strides[i],
padding = 'same',
name = 'decoder_conv_t' + str(i)
)(x)
if i < (num_decoder_layers-1):
x = LeakyReLU()(x)
if use_batch_norm:
x = BatchNormalization()(x)
if use_dropout:
x = Dropout(rate=0.3)(x)
else:
x = Activation('sigmoid')(x)
decoder_output_layer = x
self.decoder = Model(decoder_input_layer, decoder_output_layer, name='Decoder')
model_input_layer = encoder_input_layer
model_output_layer = self.decoder(encoder_output_layer)
self.model = Model(model_input_layer, model_output_layer, name='Autoencoder')
self._save()
def compile(self, learning_rate):
opt = Adam(lr=learning_rate)
self.model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
def train(self, x_train, batch_size, epochs, with_generator=False):
export_path = self.export_path
steps_per_epoch = len(x_train) / batch_size
# callbacks
save_weights_callback = ModelCheckpoint(os.path.join(export_path, 'weights.h5'), save_weights_only=True, save_freq='epoch', verbose=1)
output_image_callback = OutputAEImgCallback(export_path, 100, self)
callback_list = [save_weights_callback, output_image_callback]
if with_generator:
self.model.fit_generator(x_train, steps_per_epoch=steps_per_epoch, epochs=epochs, shuffle=True, verbose=1, callbacks=callback_list)
else:
self.model.fit(x_train, x_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=1, callbacks=callback_list)
def _save(self):
create_directory(self.export_path)
# The number and order of constructor arguments must match.
with open(os.path.join(self.export_path, 'params.pkl'), 'wb') as f:
pickle.dump([ # the arguments of the class constructor
self.input_dim, self.z_dim,
self.encoder_conv_filters, self.encoder_conv_kernel_size, self.encoder_conv_strides,
self.decoder_conv_t_filters, self.decoder_conv_t_kernel_size, self.decoder_conv_t_strides,
self.use_batch_norm, self.use_dropout,
self.export_path
], f)
def load_weights(self, filepath):
self.model.load_weights(filepath) |
20,698 | ae882498ce7de608f1f2f5da62d7034c208c562f | from data_pretreatment.data_orm import *
from data_pretreatment.common_func.deal_dateortime_func import *
import pandas as pd
'''
everyDayDetailRecord:[{'today':20100101,'everyRecord':[],'todayCostSum':11.3}]
everyDayCount:[{'today':20100101,'todayCostSum':11.3,'largerMaxFlag':0/1,'largerMaxRecordId':[111,],'smallerMinFlag':1/0}]
'''
maxMoney=50 #超过多少额度
minMoney=1 #小于多少额度
continueDays=1 #连续多少天
countDays=130
def costCountAll(stuId):
stuCostRecord = MyBaseModel.returnList(
stu_transaction_record.select(stu_transaction_record.id, stu_transaction_record.turnover,stu_transaction_record.tradingTime).where(
stu_transaction_record.stuID == stuId))
everyDayDetailRecord = []
everyDayCount = []
yesterday = getBeforeDateTime(1)
if len(stuCostRecord) == 0: # 该学生没有任何消费记录
for i in range(countDays):
nowCountDate = getBeforeDateTime((i + 1))
oneEveryDayDetailRecord = {'today':dateTimeChangeToInt(nowCountDate),'everyRecord':[],'todayCostSum':0}
oneEveryDayCount = {'today':dateTimeChangeToInt(nowCountDate),'todayCostSum':0,'largerMaxFlag':0,'largerMaxRecordId':[],'smallerMinFlag':1}
everyDayDetailRecord.append(oneEveryDayDetailRecord)
everyDayCount.append(oneEveryDayCount)
return {'stuID': stuId, 'everyDayDetailRecord':everyDayDetailRecord, 'everyDayCount': everyDayCount,
'lastTimeCountDate': str(yesterday.date())}
else:
stuDf = pd.DataFrame(stuCostRecord)
stuDf['turnover']=-stuDf['turnover'] #对交易金额取反,让消费变为正,充值为负
for i in range(countDays):
nowCountDate = getBeforeDateTime((i + 1))
nowCountNextDate = getBeforeDateTime(i)
stuNowDateRecord = stuDf[(stuDf['tradingTime'] >= nowCountDate) & (stuDf['tradingTime'] < nowCountNextDate)]
oneEveryDayDetailRecord = {'today':dateTimeChangeToInt(nowCountDate),'everyRecord':[],'todayCostSum':0}
oneEveryDayCount = {'today':dateTimeChangeToInt(nowCountDate),'todayCostSum':0,'largerMaxFlag':0,'largerMaxRecordId':[],'smallerMinFlag':1}
if len(stuNowDateRecord)==0: #当天没有消费记录
everyDayDetailRecord.append(oneEveryDayDetailRecord)
everyDayCount.append(oneEveryDayCount)
continue
else:
oneEveryDayDetailRecord['everyRecord']=list(stuNowDateRecord[stuNowDateRecord['turnover']>0]['turnover'])
oneEveryDayCount['todayCostSum']=stuNowDateRecord[stuNowDateRecord['turnover']>0]['turnover'].sum()
oneEveryDayDetailRecord['todayCostSum']=oneEveryDayCount['todayCostSum']
if oneEveryDayCount['todayCostSum']>minMoney:
oneEveryDayCount['smallerMinFlag']=0 #累计消费大于1元
largerMaxMoneyRecord=stuNowDateRecord[stuNowDateRecord['turnover']>maxMoney]['id']
if len(largerMaxMoneyRecord)>0:
oneEveryDayCount['largerMaxFlag']=1
oneEveryDayCount['largerMaxRecordId']=list(largerMaxMoneyRecord)
everyDayDetailRecord.append(oneEveryDayDetailRecord)
everyDayCount.append(oneEveryDayCount)
return {'stuID': stuId, 'everyDayDetailRecord':everyDayDetailRecord, 'everyDayCount': everyDayCount,
'lastTimeCountDate': str(yesterday.date())}
def costCountOneDay(stuId):
oneEveryDayDetailRecord = {'today': dateTimeChangeToInt(datetime.today()), 'everyRecord': [], 'todayCostSum': 0}
oneEveryDayCount = {'today': dateTimeChangeToInt(datetime.today()), 'todayCostSum': 0, 'largerMaxFlag': 0,
'largerMaxRecordId': [], 'smallerMinFlag': 1}
yesterday=getBeforeDateTime(1)
nowDate=getBeforeDateTime(0)
stuYesterdayRecord = MyBaseModel.returnList(
stu_transaction_record.select(stu_transaction_record.id, stu_transaction_record.turnover,
stu_transaction_record.tradingTime).where(
stu_transaction_record.stuID == stuId,stu_transaction_record.tradingTime >=yesterday,stu_transaction_record.tradingTime<nowDate))
if len(stuYesterdayRecord) == 0: # 当天没有消费记录
pass
else:
stuYesterdayRecord=pd.DataFrame(stuYesterdayRecord)
oneEveryDayDetailRecord['everyRecord'] = list(stuYesterdayRecord[stuYesterdayRecord['turnover'] > 0]['turnover'])
oneEveryDayCount['todayCostSum'] = stuYesterdayRecord[stuYesterdayRecord['turnover'] > 0]['turnover'].sum()
oneEveryDayDetailRecord['todayCostSum'] = oneEveryDayCount['todayCostSum']
if oneEveryDayCount['todayCostSum'] > minMoney:
oneEveryDayCount['smallerMinFlag'] = 0 # 累计消费大于1元
largerMaxMoneyRecord = stuYesterdayRecord[stuYesterdayRecord['turnover'] > maxMoney]['id']
if len(largerMaxMoneyRecord) > 0:
oneEveryDayCount['largerMaxFlag'] = 1
oneEveryDayCount['largerMaxRecordId'] = list(largerMaxMoneyRecord)
with db_data.execution_context():
stu = stu_cost_count.select().where(stu_cost_count.stuID==stuId).get()
everyDayDetailRecord=eval(stu.everyDayDetailRecord)
everyDayCount=eval(stu.everyDayCount)
del everyDayCount[0]
del everyDayDetailRecord[0]
everyDayCount.append(oneEveryDayCount)
everyDayDetailRecord.append(oneEveryDayDetailRecord)
stu.everyDayDetailRecord=everyDayDetailRecord
stu.everyDayCount=everyDayCount
stu.lastTimeCountDate=str(yesterday.date())
stu.save()
|
20,699 | 18446ce21dda85aaac39d9f2be2fb0a10a6e43f6 | from django.core.management import BaseCommand
from showcase.models import Post, Category
class Command(BaseCommand):
def handle(self, *args, **options):
Category.objects.bulk_create([
Category(name="Funny cats"),
Category(name="Very funny cats"),
Category(name="More very funny cats")
])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.