text stringlengths 38 1.54M |
|---|
"""
bbox检验器,用于检验 Pascal VOC 数据集是否合规。
当发现不合规,图片名输出至文件。
使用样例:
python vocbbox_checker.py -a ./Annotations \
-i ./ImageSets/Main/trainval.txt \
-o ./out.txt \
-w 2 \
-hm 2 \
-e 1
"""
import os
import xml.etree.ElementTree as ET
import numpy as np
import argparse
class bbox_checker():
def __init__(self):
self.annopath = ""
self.txt_list = ""
self.out_txt = ""
self.wmin = 0
self.hmin = 0
self.edge_dist = 0
def width_checker(self, xmin, xmax):
xmax = int(xmax)
xmin = int(xmin)
return (xmax - xmin) >= self.wmin
def height_checker(self, ymin, ymax):
ymax = int(ymax)
ymin = int(ymin)
return (ymax - ymin) >= self.hmin
def point_checker(self, x, y, width, height):
x = int(x)
y = int(y)
width = int(width)
height = int(height)
ret = True
ret = ret and (x >= self.edge_dist)
ret = ret and (width - x - 1 >= self.edge_dist)
ret = ret and (y >= self.edge_dist)
ret = ret and (height - y -1 >= self.edge_dist)
return ret
#测试集xml路径生成器
def dataset_gen(self):
with open(self.txt_list) as f:
lines = f.readlines()
for name in lines:
yield os.path.join(self.annopath, name.strip()+".xml")
#bbox生成器
def bbox_gen(self):
for xml_url in self.dataset_gen():
tree = ET.parse(xml_url)
objs = tree.findall('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text)
y1 = float(bbox.find('ymin').text)
x2 = float(bbox.find('xmax').text)
y2 = float(bbox.find('ymax').text)
cls = obj.find('name').text.lower().strip() #类别名
boxes[ix, :] = [x1, y1, x2, y2]
yield boxes, xml_url
#图像信息生成器
def picinfo_gen(self):
for xml_url in self.dataset_gen():
tree = ET.parse(xml_url)
sz = tree.find('size')
width = int(sz.find('width').text)
height = int(sz.find('height').text)
depth = int(sz.find('depth').text)
yield width, height, depth, xml_url
#bbox检验器
def bbox_checker(self):
for bbox, picinfo in zip(self.bbox_gen(), self.picinfo_gen()):
boxes, xml_url = bbox
width, height, depth, xml_url_2 = picinfo
assert xml_url == xml_url_2
isChosen = True
#chosen_val_lst = []
for b in boxes:
xmin = b[0]
ymin = b[1]
xmax = b[2]
ymax = b[3]
isNorm = True
if not self.point_checker(xmin, ymin, width, height):
isNorm = False
err = "point checker abn (min point)"
elif not self.point_checker(xmax, ymax, width, height):
isNorm = False
err = "point checker abn (max point)"
elif not self.width_checker(xmin, xmax):
isNorm = False
err = "width checker abn"
elif not self.height_checker(ymin, ymax):
isNorm = False
err = "height checker abn"
isChosen = isChosen and isNorm
if not isNorm:
print("{:20} is abnormal, pic size({:4},{:4}); with bbox {}, bbox size:({},{}), {}".format(
os.path.splitext(os.path.split(xml_url)[1])[0],
width,
height,
b,
xmax - xmin,
ymax - ymin,
err)
)
if isChosen:
yield xml_url
def start(self):
with open(self.out_txt, "w") as f:
for path in self.bbox_checker():
basename = os.path.split(path)[1]
f.write(os.path.splitext(basename)[0] + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="bbox checker for Pascal VOC dataset")
parser.add_argument('-a','--anno', type=str, help="annotation path")
parser.add_argument('-i','--input', type=str, help="path of imageset txt")
parser.add_argument('-o','--output', type=str, default="./out.txt", help="path of output txt")
parser.add_argument('-w','--wmin', type=int, default=0, help="min of bbox width")
parser.add_argument('-hm','--hmin', type=int, default=0, help="min of bbox height")
parser.add_argument('-e','--edgedst', type=int, default=0, help="min distance between bbox and border")
args = parser.parse_args()
print(args)
bc = bbox_checker()
bc.annopath = args.anno
bc.txt_list = args.input
bc.out_txt = args.output
bc.wmin = args.wmin
bc.hmin = args.hmin
bc.edge_dist = args.edgedst
# bf.annopath = "/share/faster_rcnn-bk/data/VOCdevkit2007/VOC2007/Annotations"
# bf.txt_list = "/share/faster_rcnn-bk/data/VOCdevkit2007/VOC2007/ImageSets/Main/trainval.txt"
# bf.out_txt = os.path.join(os.getcwd(), "out.txt")
# bf.op_func = bf.op_func_le
# bf.key = "width"
# bf.thred = 3
bc.start()
|
from django.contrib import admin
from .models import ProductSubCategory, ProductCategory, Product
admin.site.register(ProductSubCategory)
admin.site.register(ProductCategory)
admin.site.register(Product)
|
"""
WSGI config for backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from django.db.backends.signals import connection_created
from django.dispatch import receiver
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backend.settings")
application = get_wsgi_application()
@receiver(connection_created)
def setup_postgres(connection, **kwargs):
if connection.vendor != "postgresql":
return
# Timeout statements after 30 seconds.
with connection.cursor() as cursor:
cursor.execute(
"""
SET statement_timeout TO 30000;
"""
)
|
"""
============================
Author:柠檬班-木森
Time:2020/2/3 21:46
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import unittest
from HTMLTestRunnerNew import HTMLTestRunner
# 第一步:创建测试套件
suite = unittest.TestSuite()
# 第二:加载用例到套件
from py26_13day import testcases
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromModule(testcases))
# 第三步:执行测试用例(先创建测试运行程序)
# 创建测试运行程序
runner = HTMLTestRunner(stream=open("report.html", "wb"),
title="py26期第一份报告",
description="上课生成的测试报告",
tester="musen")
# 执行测试套件中的测试用例
runner.run(suite)
|
from functools import wraps
def cache(func):
d = {}
@wraps(func)
def wrap(*args):
if args not in d:
d[args] = func(*args)
print(d[args])
return d[args]
return wrap
@cache
def fib(i):
if i <= 2:
return 1
return fib(i-1) + fib(i-2)
fib(5) |
from datetime import datetime, timedelta
import airflow
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
import json
import os
# Task arguments
task_args = {
"depends_on_past": False,
"email_on_failure": True,
"owner": "mandarinduck",
"email": ["adam.booker@digital.justice.gov.uk","anvil@noms.gsi.gov.uk"],
}
dag = DAG(
"nomis-transformations",
default_args= task_args,
description= "Process and curate NOMIS data for Anvil replacement",
start_date= datetime.now(),
schedule_interval= None,
#start_date= datetime(2019, 2, 15),
#schedule_interval= '0 2 * * *',
catchup= False,
)
json_path = os.path.dirname(__file__) + "/dag_configs/nomis_transform_tasks.json"
with open(json_path) as f:
airflow_tasks = json.load(f)
def assign_task_list_to_dag(target_dag, dag_config):
# Define docker image and the AWS role (based on the airflow-repo)
repo_name = "airflow-nomis-transform"
repo_release_tag = "v2.0.18"
IMAGE = f"593291632749.dkr.ecr.eu-west-1.amazonaws.com/{repo_name}:{repo_release_tag}"
ROLE = "airflow_nomis_transform"
process_source = "mojap-raw-hist/hmpps/nomis_t62"
destination = "alpha-anvil/curated"
curate_source = "alpha-anvil/curated"
athena_database = "anvil_beta"
db_ver = "v1"
gluejob_bucket = "alpha-nomis-discovery"
gluejob_role = ROLE
entry_py_script = "run.py"
work_capacity = "4"
# Define the set of tasks using the dag_config dictionary
task_dic = dict()
for tsk in dag_config["tasks"]:
nom = f'nomis-{tsk["operation"]}-{tsk["task_id"]}'.replace("_","-")
table_set_string = ','.join(t for t in tsk["table_set"])
if "tsk_denorm" in tsk["task_id"]:
s3_source = curate_source
else:
s3_source = process_source
task_dic[tsk["task_id"]] = KubernetesPodOperator(
dag= target_dag,
namespace= "airflow",
image= IMAGE,
env_vars= {
"TABLES": table_set_string,
"NOMIS_TRANSFORM": tsk["operation"],
"SOURCE": s3_source,
"DESTINATION": destination,
"ATHENA_DB": athena_database,
"DB_VERSION": db_ver,
"PYTHON_SCRIPT_NAME": entry_py_script,
"GLUE_JOB_BUCKET": gluejob_bucket,
"GLUE_JOB_ROLE": gluejob_role,
"ALLOCATED_CAPACITY": work_capacity,
"AWS_METADATA_SERVICE_TIMEOUT": "60",
"AWS_METADATA_SERVICE_NUM_ATTEMPTS": "5"
},
labels= {"anvil": target_dag.dag_id},
name= nom,
in_cluster= True,
task_id= nom,
get_logs= True,
annotations= {"iam.amazonaws.com/role": ROLE},
)
return task_dic
def set_task_dependencies(task_dic, dag_config):
# Define the DAG dependencies using the dag_config dictionary
for tsk in dag_config["tasks"]:
for dep in tsk["task_dependency_ids"]:
task_dic[dep] >> task_dic[tsk["task_id"]]
return task_dic
task_dic = assign_task_list_to_dag(dag, airflow_tasks)
task_dic = set_task_dependencies(task_dic, airflow_tasks)
|
from django.db import models
# Create your models here.
from taggit.managers import TaggableManager
class Category(models.Model):
category_title=models.CharField(max_length=30)
def __str__(self):
return self.category_title
class Products(models.Model):
product_name=models.CharField(max_length=30)
category=models.ForeignKey(Category,on_delete=models.SET_NULL,null=True)
tags = TaggableManager()
image=models.ImageField(upload_to='./images/',blank=True)
description=models.TextField()
def __str__(self):
return self.product_name
|
import datetime
import time
from scujkbapp.jkb import jkbSession, jkbException, single_check_in
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
# Create your views here.
from scujkbapp.forms import LoginForm, RegForm
from scujkbapp.models import UserProfile, Invitation, Record
from ratelimit.decorators import ratelimit
def page_not_found(request, exception):
return render(request, '404.html', status=404)
def page_error(request):
return render(request, '500.html', status=500)
def index(request):
if request.user.is_authenticated:
user = request.user
username = user.username
return render(request, 'index.html', locals())
else:
return redirect('login')
@ratelimit(key='ip', rate='5/m', block=True)
def logIn(request):
if request.user.is_authenticated:
return redirect(request.META.get('HTTP_REFERER', '/'))
if request.method == 'GET':
form = LoginForm()
request.session['login_from'] = request.META.get('HTTP_REFERER',
'/')
return render(request, 'login.html', locals())
elif request.method == 'POST':
form = LoginForm(request.POST)
errormsg = ''
if form.is_valid():
username = str(form.cleaned_data['userName'])
password = form.cleaned_data['password']
captcha = form.cleaned_data['captcha'].strip()
if username != '' and password != '' and captcha == request.session['CheckCode'].lower():
user = authenticate(username=username, password=password)
if user is not None:
UserQ = User.objects.get(username=username)
# if UserQ.UserProfile.is_active:
login(request, user) # 调用login方法登陆账号
return redirect(request.session['login_from'])
# else:
# errormsg = "用户未激活!"
elif captcha != request.session['CheckCode'].lower():
errormsg = "验证码错误"
else:
errormsg = "用户名或密码错误"
elif username == '' or password == '':
errormsg = "用户名或密码不能为空"
else:
errormsg = "其他错误"
return render(request, 'login.html', locals())
def check_captcha(request):
import io
from . import check_captcha as CheckCode
stream = io.BytesIO()
# img 图片对象, code 在图像中写的内容
img, code = CheckCode.create_validate_code()
img.save(stream, "png")
# 图片页面中显示, 立即把 session 中的 CheckCode 更改为目前的随机字符串值
request.session["CheckCode"] = code
return HttpResponse(stream.getvalue())
def check_bind(request):
userprofile = UserProfile.objects.get(stu_id=request.user.username)
return HttpResponse("0") if userprofile.wx_uid == '' else HttpResponse("1")
def register(request):
if request.method == 'GET':
form = RegForm()
request.session['login_from'] = request.META.get('HTTP_REFERER',
'/')
return render(request, 'register.html', locals())
elif request.method == 'POST':
if not request.user.is_authenticated:
form = RegForm(request.POST)
errormsg = ''
if form.is_valid():
username = form.cleaned_data['userName']
password = form.cleaned_data['password']
# SCKey = form.cleaned_data['SCKey']
invitation = form.cleaned_data['invitation_key']
captcha = form.cleaned_data['captcha']
if username != '' and password != '' and captcha.lower() == request.session['CheckCode'].lower():
if User.objects.filter(username=username).exists():
errormsg = '学号已存在,如果您忘记了密码,请联系管理员'
else:
# 检查统一平台密码
try:
auth = jkbSession(username, password)
auth.login()
except jkbException as e:
errormsg = str(e)
return render(request, 'register.html', locals())
try:
invitation_list = Invitation.objects.get(code=invitation)
except Exception: # 0 or more than 1
errormsg = '邀请码有误'
return render(request, 'register.html', locals())
# return render(request, 'register.html', locals(), {'form': form})
if invitation_list.usedBy != '':
errormsg = '邀请码已被使用'
return render(request, 'register.html', locals())
invitation_list.usedBy = str(username)
invitation_list.usedTime = time.strftime("%Y-%m-%d %H:%M:%S")
invitation_list.save()
user = User.objects.create_user(username=username, password=password)
userProfile = UserProfile(user=user, stu_pass=password, stu_id=username)
userProfile.save()
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return redirect('index')
elif captcha != request.session['CheckCode'].lower():
errormsg = '验证码错误'
return render(request, 'register.html', locals())
else:
return render(request, 'register.html', {'form': form})
else:
return redirect('index')
@login_required
def logOut(request):
try:
logout(request)
except Exception as e:
print(e)
return redirect(request.META['HTTP_REFERER'])
def checkUsername(request):
user_name = request.GET.get('userName')
if User.objects.filter(username=user_name).exists():
return HttpResponse(1)
else:
return HttpResponse(0)
def inner_index(request):
if request.user.is_authenticated:
userprofile = UserProfile.objects.get(stu_id=request.user.username)
return render(request, 'index_v1.html', locals())
else:
return redirect('login')
@login_required
def getRecordList(request):
def utc2local(utc_st):
# UTC时间转本地时间(+8:00)
now_stamp = time.time()
local_time = datetime.datetime.fromtimestamp(now_stamp)
utc_time = datetime.datetime.utcfromtimestamp(now_stamp)
offset = local_time - utc_time
local_st = utc_st + offset
return local_st
userprofile = UserProfile.objects.get(stu_id=request.user.username)
if request.method == 'GET':
try:
page_record = []
page = request.GET.get('page')
num = request.GET.get('rows')
right_boundary = int(page) * int(num)
recordSet = Record.objects.filter(user=userprofile)
for i, record in enumerate(recordSet):
local_time = utc2local(record.addTime)
LOCAL_FORMAT = "%Y-%m-%d %H:%M:%S"
create_time_str = local_time.strftime(LOCAL_FORMAT)
single_code = {'id': i + 1, 'title': record.title, 'content': record.content,
'createTime': create_time_str}
page_record.append(single_code)
total = len(recordSet)
page_record = page_record[int(num) * (int(page) - 1):right_boundary]
return JsonResponse({'total': total, 'rows': page_record}) # 一点骚操作,异步前端操作真的不熟
except Exception as e:
raise e
@login_required
def test(request):
userprofile = UserProfile.objects.get(stu_id=request.user.username)
title, submit_info = single_check_in(userprofile)
return JsonResponse({
'status': 200,
'title': title,
'submit_info': str(submit_info)
})
@login_required
def delete(request):
userprofile = UserProfile.objects.get(stu_id=request.user.username)
user = User.objects.get(username=request.user.username)
userprofile.delete()
user.delete()
logout(request)
return redirect('index')
@login_required
def adjust(request):
userprofile = UserProfile.objects.get(stu_id=request.user.username)
if userprofile.wx_uid == '':
return HttpResponse("未绑定微信,无法调整账号状态")
userprofile.valid = not userprofile.valid
userprofile.save()
return HttpResponse("1")
def key_login(request, userkey):
if request.user.is_authenticated:
return redirect('index')
userprofiles = UserProfile.objects.filter(login_key=userkey)
if len(userprofiles) != 1:
return render(request, '500.html', status=500)
else:
login(request, userprofiles[0].user)
return redirect('index')
|
import numpy as np
import numpy.polynomial.polynomial as P
from pyx import color, deco, graph, style
np.random.seed(987)
x = np.pi*np.linspace(0, 1, 100)
y = np.sin(x)+0.1*np.random.rand(100)
fit = P.Polynomial(P.polyfit(x, y, 2))
g = graph.graphxy(width=8,
x=graph.axis.lin(title=r'\Large $x$', divisor=np.pi,
texter=graph.axis.texter.rational(suffix=r'\pi')),
y=graph.axis.lin(min=0, max=1.1, title=r'\Large $y$',
parter=graph.axis.parter.lin(tickdists=[0.2])))
origdata = list(zip(x, y))
symbolattrs = [deco.filled, color.hsb(0.6, 1, 0.7)]
g.plot(graph.data.points(origdata, x=1, y=2),
[graph.style.symbol(graph.style.symbol.circle, 0.07,
symbolattrs=symbolattrs)])
fitdata = list(zip(x, fit(x)))
lineattrs = [color.hsb(0.05, 1, 0.7), style.linewidth.THick]
g.plot(graph.data.points(fitdata, x=1, y=2),
[graph.style.line(lineattrs=lineattrs)])
g.writePDFfile()
|
from __future__ import division
import pickle
import csv
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
import warnings
warnings.filterwarnings("ignore")
var = input("Please enter the news text you want to verify: ")
print("You entered: " + str(var))
prob_score,cont_score = 0, 0
#function to run for prediction
def detecting_fake_news(var):
#retrieving the best model for prediction call
load_model = pickle.load(open('final_model.sav', 'rb'))
prediction = load_model.predict([var])
prob = load_model.predict_proba([var])
prob_score = prob[0][1]*20/100
return prob_score
#(print("The given statement is ",prediction[0]),
#print("The truth probability score is ",prob[0][1]))
#function to perform search
def news_search(var):
ps = PorterStemmer()
stop_words = set(stopwords.words('english'))
stop_words.update(':','&','|','!','?','-','$',',','@','#','%','^','*','"',"'",';',':','.','',' ')
word_tokens = []
word_tokens.append(word_tokenize(var))
inputted=[]
headlines=[]
for j in word_tokens:
for w in j:
if w not in stop_words:
inputted.append(ps.stem(w).encode('ascii', 'ignore').decode('ascii'))
maxcount, normalize = 0, 0
with open('newstitles.csv') as File:
reader = csv.reader(File)
for row in reader:
total=len(row)
count=0
for k in inputted:
if k in row:
count=count+1
else:
pass
#print(count)
if(count>maxcount):
maxcount=count
normalize=float(maxcount/total)
else:
pass
cont_score = normalize*80/100
return cont_score
if __name__ == '__main__':
a = detecting_fake_news(var)
b = news_search(var)
print(a)
print(b)
final_score = a+b
print(final_score)
if(final_score>0.75):
print('The given news is true')
elif(final_score>0.40):
print('The given news may or maynot be true')
else:
print('The given news is false')
|
a = int (input('Enter the first number : '))
b = int (input('Enter the second number : '))
print('The sum of two numbers is ',a+b)
print('The difference of two number is ',a-b) |
from django.db import models
# Create your models here.
class SignUp(models.Model):
username= models.CharField( max_length=200, blank=False, null=True)
useremail= models.EmailField(max_length=250,blank=False, null=True)
password= models.CharField(max_length=250,blank=False, null=True)
contact= models.IntegerField(blank=True, null=True)
designation= models.CharField(max_length=250,blank=True, null=True)
address= models.TextField(blank=True, null=True)
created_date= models.DateTimeField(auto_now_add=True)
isAdmin= models.BooleanField(default=False)
isSupportEngineer= models.BooleanField(default=False)
isClient= models.BooleanField(default=False)
def __unicode__(self):
return str(self.useremail)
|
# -*- coding:utf-8 -*-
import tornado.web
from torcms.model.info_model import MInfor
from torcms.model.info_relation_model import MInforRel
from torcms.model.usage_model import MUsage
from torcms.core.base_handler import BaseHandler
from torcms.model.collect_model import MCollect
import json
class CollectHandler(BaseHandler ):
def initialize(self):
self.init()
self.mequa = MInfor()
self.musage = MUsage()
self.mrel = MInforRel()
self.mcollect = MCollect()
def get(self, url_str=''):
if len(url_str) > 0:
url_arr = self.parse_url(url_str)
else:
return False
if url_str == 'list':
self.list()
elif len(url_arr) == 1 and len(url_str) == 4 :
if self.get_current_user():
self.add_or_update(url_str)
else:
self.set_status('403')
return False
@tornado.web.authenticated
def add_or_update(self, app_id):
self.mcollect.add_or_update(self.userinfo.uid, app_id)
out_dic = {'success': True}
return json.dump(out_dic, self)
@tornado.web.authenticated
def list(self):
self.render('user/collect/list.html',
recs_collect = self.mcollect.query_recent(self.userinfo.uid, 20),
userinfo = self.userinfo,
)
|
import pymysql
connection = pymysql.connect(host='localhost',
user='root',
password='root',
db='imdb',
charset='utf8',
cursorclass=pymysql.cursors.DictCursor,
)
actors_list = ['Jason statham', 'David Schwimmer', 'Jennifer Aniston']
print(actors_list)
try:
with connection.cursor() as c:
for actor in actors_list:
sql = 'Select full_name from actors where full_name= ' + '"' + actor + '"'
c.execute(sql)
result = c.fetchone()
print(actor + ' exists in the database' if result is not None else actor +
' does not exists in the database')
finally:
connection.close()
|
from django.apps import AppConfig
class MainLibraryConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'main_library'
|
from turtle import Turtle
import random
COLORS = ['red', 'green', 'blue', 'orange', 'yellow', 'grey', 'gold', 'pink']
FONT = ('courier', 20, 'normal')
class Car(Turtle):
def __init__(self):
super().__init__()
self.cars = []
self.level = 0
self.penup()
self.hideturtle()
self.goto(0, 240)
self.write(f"Level = {self.level}", align='center', font=FONT)
self.car_speed = 0.1
self.starting_cars(10)
def starting_cars(self, n):
self.clear()
self.write(f"Level = {self.level}", align='center', font=FONT)
for i in range(0, n):
r = random.randint(-200,200)
t = Turtle('square')
t.shapesize(1, 2)
t.color(random.choice(COLORS))
t.setheading(180)
t.penup()
t.goto(280, r)
self.cars.append(t)
for i in range(0, n):
r = random.randint(-200,200)
t = Turtle('square')
t.shapesize(1, 2)
t.color(random.choice(COLORS))
t.penup()
t.goto(-280, r)
self.cars.append(t)
def move_cars(self):
for i in self.cars:
r= random.randint(0,10)
i.penup()
i.forward(r)
def starting_point(self):
for i in self.cars:
i.hideturtle()
self.cars = []
self.car_speed *= 0.9
self.level += 1
self.starting_cars(10) |
res = []
with open("sem_dev_loop.txt", "r") as file:
for line in file:
res.append(float(line.split()[1]))
print(res)
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import os
import sys
current_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append('../model')
sys.path.append('../mt5api')
sys.path.append('../db')
sys.path.append('../utillity')
sys.path.append('../setting')
import glob
import logging
import time
import pandas as pd
from MT5Bind import MT5Bind
from PriceDatabase import PriceDatabase, ManageTable, CandleTable, TickTable
from TimeUtility import TimeUtility
from Timeframe import Timeframe, HOUR, MINUTE, DAY
from Setting import Setting
from threading import Thread
from Schedular import Schedular
logging.basicConfig(level=logging.DEBUG, filename="debug.log", format="%(asctime)s %(levelname)-7s %(message)s")
logger = logging.getLogger("logger")
class XMHandler:
def __init__(self):
pass
def buildManageTable(self):
manage = ManageTable()
db = PriceDatabase()
return db.create(manage)
def buildCandleTable(self, stock, timeframe: Timeframe):
table = CandleTable(stock, timeframe)
db = PriceDatabase()
return db.create(table)
def buildTickTable(self, stock):
table = TickTable(stock)
db = PriceDatabase()
return db.create(table)
def update(self, stock, timeframe: Timeframe, data):
# remove last data
data = data[:len(data) - 1]
for d in data:
logger.debug('update() ... ' + stock + '-' + timeframe.symbol + ': ' + str(d[0]))
table = CandleTable(stock, timeframe)
db = PriceDatabase()
ret = db.insert(table, data)
if ret == False:
print('DB Insert error1')
return -1
stored = db.fetchAllItem(table, 'time')
times = stored['time']
tbegin = times[0]
tend = times[-1]
(begin, end) = self.rangeOfTime(stock, timeframe)
manage = ManageTable()
db = PriceDatabase()
if begin is None:
ret = db.insert(manage, [[stock, timeframe.symbol, tbegin, tend]])
else:
ret = db.update(manage, [stock, timeframe.symbol, tbegin, tend])
return len(data)
def updateTicks(self, stock, data):
# remove last data
data = data[:len(data) - 1]
table = TickTable(stock)
db = PriceDatabase()
ret = db.insert(table, data)
if ret == False:
print('DB Insert error1')
return -1
stored = db.fetchAllItem(table, 'time')
times = stored['time']
if len(times) == 0:
tbegin = None
tend = None
else:
tbegin = times[0]
tend = times[-1]
(begin, end) = self.rangeOfTicks(stock)
manage = ManageTable()
db = PriceDatabase()
if begin is None:
ret = db.insert(manage, [[stock, 'tick', tbegin, tend]])
else:
ret = db.update(manage, [stock, 'tick', tbegin, tend])
return len(data)
def rangeOfTime(self, stock, timeframe:Timeframe):
db = PriceDatabase()
manage = ManageTable()
item = db.fetchItem(manage, {'stock':stock, 'timeframe':timeframe.symbol})
if len(item) == 0:
print('Management DB update Error!')
return (None, None)
begin = item['tbegin']
end = item['tend']
return (begin, end)
def rangeOfTicks(self, stock):
db = PriceDatabase()
manage = ManageTable()
item = db.fetchItem(manage, {'stock':stock, 'timeframe':'tick'})
if len(item) == 0:
print('Management DB update Error!')
return (None, None)
begin = item['tbegin']
end = item['tend']
return (begin, end)
# -----------------
# Singleton
handler = XMHandler()
loop = True
# -----------------
def keyOfData(stock, timeframe):
return stock + '-' + timeframe.symbol
def start():
stocks = Setting.xm_index() + Setting.xm_fx()
schedular = Schedular()
for stock in stocks:
for timeframe in Timeframe.timeframes():
schedular.addTask(keyOfData(stock, timeframe), timeframe)
is_initial = True
while loop:
for stock in stocks:
server = MT5Bind(stock)
for timeframe in Timeframe.timeframes():
if is_initial or schedular.shouldDoNow(keyOfData(stock, timeframe)):
(tbegin, tend) = handler.rangeOfTime(stock, timeframe)
data = server.acquireRange(timeframe, tend, TimeUtility.nowJst())
logger.debug(stock + ' ' + timeframe.symbol + 'Download Length: ' + str(len(data)) )
if len(data) > 1:
handler.update(stock, timeframe, data)
print(stock, timeframe.symbol, 'Download done ', len(data))
is_initial = False
def stop():
loop = False
# --------------------
def build(stocks):
is_first = True
for stock in stocks:
if is_first:
handler.buildManageTable()
print('Manage Table build')
is_first = False
handler.buildTickTable(stock)
print(stock + ': Tick Table build')
for timeframe in Timeframe.timeframes():
handler.buildCandleTable(stock, timeframe)
print(stock + ': ' + timeframe.symbol + ' Table build')
def buildTest():
is_first = True
timeframes = [Timeframe('M1')]
for stock in ['US30Cash']:
if is_first:
handler.buildManageTable()
print('Manage Table build')
is_first = False
handler.buildTickTable(stock)
print(stock + ': Tick Table build')
for timeframe in timeframes:
handler.buildCandleTable(stock, timeframe)
print(stock + ': ' + timeframe.symbol + ' Table build')
def firstUpdate(stocks, size=99999):
for stock in stocks:
server = MT5Bind(stock)
for timeframe in Timeframe.timeframes():
(begin, end) = handler.rangeOfTime(stock, timeframe)
data = server.acquire(timeframe, size=size)
if len(data) <= 1:
print('Error No Data', stock, timeframe.symbol)
continue
handler.update(stock, timeframe, data)
begin, end = handler.rangeOfTime(stock, timeframe)
print('Done... legth: ', len(data), stock, timeframe.symbol, begin, end)
logger.debug('firstUpdate() ... ' + stock + '-' + timeframe.symbol + ' begin: ' + str(begin) + ' end: ' + str(end))
def updateTicks(stock, repeat=100000):
server = MT5Bind(stock)
tbegin, tend = handler.rangeOfTicks(stock)
if tend is None:
t = TimeUtility.jstTime(2018, 1, 1, 0, 0)
else:
t = tend
nothing = 0
for i in range(repeat):
data = server.acquireTicks(t, size=20000)
if len(data) > 1:
handler.updateTicks(stock, data)
print(stock, str(TimeUtility.nowJst()), 'Tick Download done ', i, len(data), data[0], data[-1])
logger.debug('updateTicks() ... ' + stock + ': ' + str(i) + ' Length:' + str(len(data)) + '...' + str(data[0]) + '-' + str(data[-1]))
tbegin, tend = handler.rangeOfTicks(stock)
t = tend
nothing = 0
else:
t += TimeUtility.deltaHour(1)
nothing += 1
if nothing > 10 * 24:
break
def downloadTickData(save_dir, stock, year, month, day):
filepath = save_dir + stock + '_Tick_' + str(year).zfill(4) + '-' + str(month).zfill(2) + '-' + str(day).zfill(2) + '.csv'
if os.path.isfile(filepath):
return
server = MT5Bind(stock)
t_from = TimeUtility.jstTime(year, month, day, 0, 0)
t_to = t_from + TimeUtility.deltaDay(1) #TimeUtility.jstTime(year, month, day, 23, 59)
data = server.acquireTicksRange(t_from, t_to)
if len(data) > 0:
df = pd.DataFrame(data=data, columns=['Time', 'Bid', 'Ask', 'Mid', 'Volume'])
df.to_csv(filepath, index=False)
def deleteLastFile(dir_path):
l = glob.glob(dir_path)
if len(l) > 0:
file = l[-1]
os.remove(file)
print('Delete File ...' + file)
return file
else:
return None
def taskDownloadTick(stock):
root = 'd://tick_data/' + stock + '/'
try:
os.mkdir(root)
except:
print('!')
for year in range(2016, 2021):
dir_path = root + str(year).zfill(4) + '/'
try:
os.mkdir(dir_path)
except:
deleteLastFile(dir_path + '/*.csv')
for month in range(1, 13):
for day in range(1, 32):
try:
t = TimeUtility.jstTime(year, month, day, 0, 0)
downloadTickData(dir_path, stock, year, month, day)
except:
continue
print('Done ' + stock + '...' + str(year))
def test1():
stock = 'US30Cash'
timeframe = Timeframe('M1')
(begin, end) = handler.rangeOfTime(stock, timeframe)
#t0 = end + timeframe.deltaTime
#t1 = TimeUtility.nowJst() - TimeUtility.deltaMinute(1)
server = MT5Bind(stock)
data = server.acquire(timeframe, size=500)
if len(data) <= 1:
return len(data) - 1
handler.update(stock, timeframe, data)
begin, end = handler.rangeOfTime(stock, timeframe)
print('Done...', stock, timeframe, begin, end)
def test2():
stock = 'US30Cash'
timeframe = Timeframe('M1')
(tbegin, tend) = handler.rangeOfTime(stock, timeframe)
server = MT5Bind(stock)
now = TimeUtility.nowJst()
data = server.acquireRange(timeframe, tend, now)
if len(data) == 0:
return -1
if len(data) == 1:
return 0
handler.update(stock, timeframe, data)
begin, end = handler.rangeOfTime(stock, timeframe)
print('Done...', stock, timeframe, begin, end)
def test3():
stock = 'US30Cash'
(tbegin, tend) = handler.rangeOfTicks(stock)
tfrom = TimeUtility.nowJst()
server = MT5Bind(stock)
data = server.acquireTicks(tfrom, size=1000)
if len(data) <= 1:
return len(data) - 1
handler.updateTicks(stock, data)
begin, end = handler.rangeOfTicks(stock)
print('Done...', stock, begin, end)
def test4():
stock = 'US30Cash'
(tbegin, tend) = handler.rangeOfTicks(stock)
now = TimeUtility.nowJst()
server = MT5Bind(stock)
data = server.acquireTicks(now, size = 10)
if len(data) <= 1:
return len(data) - 1
handler.updateTicks(stock, data)
begin, end = handler.rangeOfTicks(stock)
print('Done...', stock, begin, end)
def save(stock, timeframe):
server = MT5Bind(stock)
dic = server.scrapeWithDic(timeframe)
values = dic['data']
d = []
for value in values:
d.append([value['time'], value['open'], value['high'], value['low'], value['close']])
df = pd.DataFrame(data=d, columns=['Time', 'Open', 'High', 'Low', 'Close'])
df.to_csv('./' + stock + '_' + timeframe + '.csv', index=False)
def ticksThread():
thread1 = Thread(target=updateTicks)
thread1.start()
thread1.join()
if __name__ == '__main__':
#stocks = Setting.xm_index() + Setting.xm_fx()
#build(stocks) # Build Tables
#firstUpdate(stocks) # Initial Data save to table
#ticksThread()
#"['US500Cash', 'CHI50Cash', 'GER30Cash', 'USDJPY', 'AUDJPYmicro', 'EURUSD', 'GBPUSD']
for stock in ['USDJPY', 'AUDJPYmicro', 'EURUSD', 'GBPUSD']:
taskDownloadTick(stock)
|
# Shuffle an Array
# Shuffle a set of numbers without duplicates.
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
n = len(self.nums)
shuffled = [num for num in self.nums]
# modern Knuth's algo
for i in range(n):
j = random.randint(0, n - 1)
shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
return shuffled
# haven't found a good proof yet, but to generate a random permutation,
# each num has a probability of 1 / n to be at any index
|
import pandas
data = pandas.read_csv('countries_by_area.txt')
data['density'] = data['population_2013'] / data['area_sqkm']
data = data.sort_values(by='density', ascending=False)
print(data)
for index, row in data[:5].iterrows():
print(row['country'])
|
import tweepy
from tweepy import OAuthHandler
import time
import pandas as pd
def getTeams(path):
dfIn = pd.read_excel(path)
dfIn["SearchTerm"] = dfIn["Hashtag"] + dfIn["Teams"]
teamsList = dfIn["SearchTerm"].values.tolist()
qualifierTerms = ['March Madness', 'NCAA', 'Basketball']
fullSearchList = teamsList + qualifierTerms
return fullSearchList
class MyStreamListener(tweepy.StreamListener):
def on_data(self, data):
try:
outputData = open("twitterStream-out.txt", "a")
outputData.write(data)
outputData.write('\n')
outputData.close()
return True
except:
print("Data error")
time.sleep(2)
def on_error(self, status_code):
if status_code == 420:
# returning False in on_data disconnects the stream
return False
consumer_key = 'KEY'
consumer_secret = 'Secret'
access_token = 'Token'
access_secret = 'Secret'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth=api.auth, listener=MyStreamListener())
print('Running stream...')
myStream.filter(track=getTeams("Path")) |
#!/usr/bin/env python
# ionconfigmerge.py
# Copyright (Unpublished--all rights reserved under the copyright laws of the
# United States), U.S. Government as represented by the Administrator of the
# National Aeronautics and Space Administration. No copyright is claimed in the
# United States under Title 17, U.S. Code.
# Permission to freely use, copy, modify, and distribute this software and its
# documentation without fee is hereby granted, provided that this copyright
# notice and disclaimer of warranty appears in all copies.
# DISCLAIMER:
# THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND, EITHER
# EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY
# THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND FREEDOM FROM
# INFRINGEMENT, AND ANY WARRANTY THAT THE DOCUMENTATION WILL CONFORM TO THE
# SOFTWARE, OR ANY WARRANTY THAT THE SOFTWARE WILL BE ERROR FREE. IN NO EVENT
# SHALL NASA BE LIABLE FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO, DIRECT,
# INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR
# IN ANY WAY CONNECTED WITH THIS SOFTWARE, WHETHER OR NOT BASED UPON WARRANTY,
# CONTRACT, TORT , OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY PERSONS
# OR PROPERTY OR OTHERWISE, AND WHETHER OR NOT LOSS WAS SUSTAINED FROM, OR AROSE
# OUT OF THE RESULTS OF, OR USE OF, THE SOFTWARE OR SERVICES PROVIDED HEREUNDER.
# This script makes merging ION config files with ionscript a bit easier.
# Provide this script with the location of ionscript, the "master" name of the *.rc files,
# and the location of those files, and it will merge all available files into a single .rc file.
# NOTE: This script can only *merge* files, not split an existing .rc file.
# Example ./ionconfigmerge ~/ion-open-source/ionscript host1 ~/ion-open-source/tests/host1test/
import sys
import os.path
import os
import subprocess
if len(sys.argv) < 4:
sys.exit('Usage: {0} </path/to/ionscript> <rc filename> </path/to/rc files/>'.format(sys.argv[0]))
rcTypes = {'acsrc':'-a','bprc':'-b','bssrc':'-B','dtn2rc':'-d','dtpcrc':'-D','ionrc':'-i','ltprc':'-l','imcrc':'-m','ipnrc':'-p','ionsecrc':'-s','cfdprc':'-c'}
ionScriptPath = sys.argv[1]
rcName = sys.argv[2]
rcPath = sys.argv[3]
# The user could potentially pass three types of strings for ionscript:
# (1) a path directly to ionscript (ends with "ionscript") or
# a directory path (2) with or (3) without a trailing slash.
# The code below is meant to handle all three cases (hopefully)
# If the path ends with "ionscript", assume it's correct
if ionScriptPath.endswith('ionscript'):
pass
# Else path given is a directory that may need fixing
elif os.path.isdir(ionScriptPath):
if not ionScriptPath.endswith('/'):
ionScriptPath += '/'
ionScriptPath += 'ionscript'
# Sanity check
if (os.path.isfile(ionScriptPath)) and (os.access(ionScriptPath, os.X_OK)):
print 'Found ionscript at {0}'.format(ionScriptPath)
else:
sys.exit('ERROR: ionscript not found')
if not rcPath.endswith('/'):
rcPath += '/'
print 'Running ionscript on {0} ION config files in {1} ...'.format(rcName,rcPath)
ionScriptCmd = []
for rcExt, rcSwitch in rcTypes.iteritems():
rcFilePath = '{0}{1}.{2}'.format(rcPath,rcName,rcExt)
if os.path.isfile(rcFilePath):
ionScriptCmd.append(rcSwitch)
ionScriptCmd.append(rcFilePath)
# If the list is empty here, the above loop didn't find any files
if not ionScriptCmd:
sys.exit('ERROR: No ION .*rc files found')
ionScriptCmd.insert(0, ionScriptPath)
ionScriptCmd.append('-O')
ionScriptCmd.append('{0}{1}.rc'.format(rcPath,rcName))
#print ionScriptCmd # DEBUG
subprocess.call(ionScriptCmd)
|
from GA import GA
def run(probParam=None, generationParam=None):
probParam['function'] = CalculateFitness
runGenerations(probParam, generationParam)
def runGenerations(probParam=None, generationParam=None):
#star genetic algorithm
ga = GA(generationParam, probParam)
ga.initialisation()
ga.evaluation()
#path
x=[]
y=[]
#run generations
g = -1
while (g < generationParam['noGen']):
g += 1
y.append(g) #nr of gen
x.append(ga.bestChromosome().fitness)
#ga.oneGenerationRand()
ga.oneGenerationElitism() #cea mai buna
#ga.oneGenerationSteadyState() #slab
#print best fitness of current generation
print("Generatia curenta: " + str(g) + "; " + "Best fitness: " + str(ga.bestChromosome().fitness))#str(ga.bestChromosome().fitness))
#print other parameters
cost = int(ga.bestChromosome().fitness)
path = ga.bestChromosome().repres
strpath = ''
path.insert(0, 0)
for i in range(len(path)):
strpath += str(path[i] + 1)
if i != len(path) - 1:
strpath += ','
print("Lungimea traseului: " + str(len(path)))
print("Traseu: " + strpath)
print("Cost: " + str(cost))
#
import matplotlib.pyplot as plt
# x axs values
# x = [1, 2, 3]
# corresponding y axis values
# y = [2, 4, 1]
# plotting the points
plt.plot(x, y)
# naming the x axis
plt.xlabel('fitness')
# naming the y axis
plt.ylabel('nr generatiei')
# giving a title to my graph
plt.title('My first graph!')
# function to show the plot
plt.show()
def CalculateFitness(path, probParam):
'''
Calcularea fitness-ului
:param probParam: matrix
:return: float - fitness
'''
matirx = probParam['matrix']
fit = 0.0
i = 0
for j in range(len(path)):
fit += matirx[i][path[j]]
i = path[j]
fit += matirx[i][0]
return fit
|
# coding = utf-8
import pytest
from selenium import webdriver
from time import ctime,sleep
import os
option=webdriver.ChromeOptions()
option.binary_location = "C:/Users/86152/AppData/Local/Google/Chrome/Application/chrome.exe"
#chrome软件执行地址
chrome_driver_binary= "C:/Users/86152/AppData/Local/Google/Chrome/Application/chromedriver.exe"
#驱动地址
browser=webdriver.Chrome(chrome_driver_binary,chrome_options=option)
class TestCase():
def setup_class(self):
print("\n setup_class:所有⽤例执⾏之前")
def teardown_class(self):
print("\n teardown_class:所有⽤例执⾏之后")
browser.quit()
def setup_method(self):
browser.get("http://www.baidu.com")
sleep(3)
# setup_method: 每个⽤例开始前执⾏
def teardown_method(self):
sleep(3)
# teardown_method: 每个⽤例结束后执⾏
def test_one(self):
print("\n 正在执⾏测试类----test_one")
browser.find_element_by_id("kw").send_keys("Test search")
browser.find_element_by_id("su").click()
def test_two(self):
print("\n 正在执⾏测试类----test_two")
browser.find_element_by_id("kw").send_keys(" ")
browser.find_element_by_id("su").click()
def test_three(self):
print("\n 正在执⾏测试类----test_three")
browser.find_element_by_id("kw").send_keys("语文")
browser.find_element_by_id("su").click()
# def test_four(self):
# print("\n 正在执⾏测试类----test_four") |
class Solution:
def isToeplitzMatrix(self, matrix):
# Solution 1 - Hash Map
# O(n) Time, O(n) Space
# The key idea is to realize that 2 coordinates (r1, c1) and (r2, c2)
# belong to the same diagonal if and only if (r1-c1) == (r2-c2).
diagonals = {}
for r, row in enumerate(matrix):
for c, val in enumerate(row):
if r-c not in diagonals:
diagonals[r-c] = val
elif diagonals[r-c] != val:
return False
return True
|
from django.shortcuts import render
from .models import EmailModel
import smtplib,os,sys
# Create your views here.
def home(request):
data = EmailModel()
if request.method == 'POST':
data.Email = request.POST['Email']
data.Message = request.POST['Message']
EmailModel.save(data)
content = data.Message
mail = smtplib.SMTP('smtp.gmail.com',587)
mail.ehlo()
mail.starttls()
mail.login('your Email','password')
mail.sendmail('your Email',data.Email,content)
mail.close()
return render (request,'index.html')
|
from collections import OrderedDict
import itertools
import pandas as pd
from shapely.geometry import LineString
from shapely.ops import linemerge
from ..utils.utils import drop_consecutive_duplicates
class StreetStretch:
"""
An object that represents a "stretch" - usually a list of segments along
a single street.
It is created from a list of segments and provides functions to get
the on/from/to streets, geometry and length of the stretch.
"""
def __init__(self, geocoder, segments, side=None):
"""
Parameters
----------
geocoder : Geocoder
A reference to a Geocoder, usally the one that created this stretch
through get_street_stretch_by_geometry or get_street_stretch_by_code.
But a StreetStretch can be created manually from a list of segments.
The given segments must exist in the given Geocoder.
segments : list of str
A list of segments in the normalized format (physical_id:<id>)
side : str, optional
A side of street to drive on - either 'L' or 'R' (Default None)
"""
# Reference to the geocoder that created this stretch
self.geocoder = geocoder
self._segments = segments
self.side = side
self._on_from_to = None
def get_segments(self, include_side_of_street=True):
"""
Return the list of segment IDs.
Parameters
----------
include_side_of_street : bool, optional
Whether to include the side of street character (L or R) with the
segment IDs. (Default True)
Returns
-------
list of str
"""
if include_side_of_street:
return [s.split(':')[1] for s in self._segments]
else:
return [s.split(':')[1][:-1] for s in self._segments]
@property
def segments(self):
return self.get_segments(False)
def __len__(self):
"""
Returns
-------
int
The number of segments on the stretch.
"""
return len(self._segments)
@property
def length(self):
"""
Returns
-------
The length of the stretch in feet.
"""
return sum([self.geocoder.segments[i]['len'] for i in self.segments])
def _get_on_streets(self, segments):
"""
Given a list of segments, return a list of sets of street codes that the
segments are on. Sets of street codes are returned because sometimes
multiple street codes refer to the same physical street.
If a street transitions into another street, consider it the same
street. For example, Hogan Place turns into Leonard Street over three
segments. The first segment is just Hogan Place, then one segment is
both Hogan and Leonard, and the final one is just Leonard. Since the
streets overlapped, it will be considered one street.
Parameters
----------
segments : list of str
Returns
-------
list of sets of str
"""
streets = []
for segment_id in segments:
# Get the set of street codes for each segment
street_codes = self.geocoder.segments[segment_id]['street_code']
# Check if this segment's street codes overlap with any of the
# already processed segments, if so, add this segment's street
# codes to the existing set.
match = False
for i in range(len(streets)):
if streets[i].intersection(street_codes):
streets[i] = streets[i].union(street_codes)
match = True
if not match:
streets.append(street_codes)
return streets
@property
def number_of_on_streets(self):
return len(self._get_on_streets(self.segments))
@property
def start_and_end_on_same_street(self):
"""
Returns
-------
bool
Whether or not the street that the stretch starts on is the same
as the one that it ends on.
"""
# Get the on streets using _get_on_streets to handle street codes
# that change even though the street physically stays the same.
segments = self.segments
on_streets = self._get_on_streets(segments)
# Get the on street codes specifically for the endpoints.
endpoints = self._get_on_streets([segments[0], segments[-1]])
# If there is only one street code set for the endpoints, then they
# must start and end on the same street.
if len(endpoints) == 1:
return True
# Otherwise, check if each of the endpoints intersects with any of the
# streets the strtech goes on. Since `_get_on_from_to` handles
# transitioning street codes, this ensures that even if the endpoints
# themselves have different street codes, if the street codes overlap
# during the stretch, then it will be counted as starting and stopping
# on the same street.
for street in on_streets:
if endpoints[0].intersection(street):
if endpoints[1].intersection(street):
return True
return False
@property
def number_of_turns(self):
"""
Return the number of "turns" on as stretch, which is the number of times
that a segment's street codes don't match the next segment's street
codes at all.
Returns
-------
int
"""
turns = 0
previous_street = None
for segment_id in self.segments:
street = self.geocoder.segments[segment_id]['street_code']
if previous_street and not previous_street.intersection(street):
turns += 1
previous_street = street
return turns
def get_geometry(self, merge=True):
"""
Return the geometry of the stretch, either as a list of geometries for
each segment, or as one single geometry.
Parameters
----------
merge : bool, optional
Whether to merge the segment geometries into a single geometry.
(Default True)
Returns
-------
shapely.LineString or list of shapely.LineString
"""
geometries = []
for segment in self.get_segments():
segment = self.geocoder.segment_column + ':' + segment
segment_id, side_of_street = self.geocoder.parse_geometry(segment)[2:]
geometry = self.geocoder.segments[segment_id]['geometry']
traffic_direction = self.geocoder.segments[segment_id]['traffic_direction']
# Flip the geometry if direction of travel is reverse of
# the drawn direction
if (
(traffic_direction == 'A') or
((traffic_direction == 'T') and (side_of_street == 'L'))
):
# Create a new LineString from the coordinates reversed
geometries.append(LineString(geometry.coords[::-1]))
else:
geometries.append(geometry)
if merge:
# Manually Merge the geometries by getting all of the coordinates
# from each segment in order
coords = [c for g in geometries for c in g.coords]
# Drop consecutive points - necessary?
coords = drop_consecutive_duplicates(coords)
return LineString(coords)
else:
return geometries
@property
def endpoint_nodes(self):
"""
Return a tuple of node IDs with the start and end nodes of the stretch.
Returns
-------
(str, str)
A tuple (start_node, end_node)
"""
return (
# Get the node that comes before the first segment
self.geocoder.parse_geometry(
list(self.geocoder.node_network.predecessors(
self._segments[0]
))[0]
)[2],
# And the node that comes after the last
self.geocoder.parse_geometry(
list(self.geocoder.node_network[self._segments[-1]])[0]
)[2]
)
@property
def on_from_to(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._on_from_to
@property
def on_streets(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._on_streets
@property
def from_streets(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._from_streets
@property
def to_streets(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._to_streets
def get_on_from_to(self):
"""
Return a list of dictionaries of On/From/To street options. Each
dictionary has `on_street`, `on_street_code`, `from_street`,
`from_street_code`, `to_street` and `to_street_code`.
If the on/from/to is unambiguous, then it will return a list of length
one. When ambiguous, return more than one option, with the "most likely"
option first. Likelihood is determined by the number of times that
street appears along the stretch, how often it appears globally in NYC,
and whether it is at the start or end of the stretch.
Returns
-------
list of dict
"""
def get_streets_from_segments(segments, sort_cols, segments_dict):
"""
Return a list of (street, street_code) tuples for the given segments
sorted in "likelihood" order.
Parameters
----------
segments: list
A list of segment IDs
sort_cols: list
A subset of ['count', 'start', 'end', 'global_count'] used to
sort the streets into likelihood order. For the on street, use
all. For from/to, use count and global_count.
"""
# Iterate through all the segments` street/street_code pairs and
# Add them to the streets dictionary.
streets = {}
for i, segment in enumerate(segments):
pairs = segments_dict[segment]['street_street_code_pair']
for street, street_code in pairs:
pair = (street, street_code)
if pair not in streets:
streets[pair] = {
'street': street,
'street_code': street_code,
# Keep track of occurances of this pair.
'global_count': len(self.geocoder.streets[street_code]['df']),
'count': 0, 'start': 0, 'end': 0,
}
# If the street appears at the start or end, favor it.
if i == 0:
streets[pair]['start'] += 1
if i == (len(segments) - 1):
streets[pair]['end'] += 1
# Count the number of occurances of that pair.
streets[pair]['count'] += 1
# Return (street, street_code) tuples sorted by likelihood
return [
(street['street'], street['street_code']) for street in
sorted(streets.values(), key=lambda street: tuple(
-street[col] for col in sort_cols
))
]
# Get the unique on segment IDs and use them to get on street options.
on_segments = self.segments
on_streets = get_streets_from_segments(
on_segments, ['count', 'start', 'end', 'global_count'],
self.geocoder.segments
)
def drop_overlapping_streets(a, b):
"""
Return the streets in a that are not in b unless a and b are the
same.
"""
a_codes = [s[1] for s in a]
b_codes = [s[1] for s in b]
if set(a_codes).difference(b_codes):
return [s for s in a if s[1] not in b_codes]
return a
def get_node_streets(node):
"""A function to get street options for the nodes."""
# If the node is a dead end, just return DEAD END.
if self.geocoder.nodes[node]['dead_end']:
return [(
'DEAD END', 'dead_end'
)]
# Get the segments at the node, not inculding the on segments.
segments = set([
s for s in self.geocoder.nodes[node]['segments']
#if self.geocoder.lion_segments[s]['physical_id'] not in on_segments
])
segments2 = set([
s for s in segments if
self.geocoder.lion_segments[s]['physical_id'] not in on_segments
])
if segments2:
segments = segments2
streets = get_streets_from_segments(
segments, ['count', 'global_count'], self.geocoder.lion_segments
)
return drop_overlapping_streets(streets, on_streets)
# Get from node, to node and the respective street options
from_node, to_node = self.endpoint_nodes
from_streets = get_node_streets(from_node)
to_streets = get_node_streets(to_node)
on_streets = drop_overlapping_streets(on_streets, from_streets + to_streets)
# Cache the results on the object for future lookup
self._on_streets = on_streets
self._from_streets = from_streets
self._to_streets = to_streets
# Return a list of dictionaries of the combinations of on/from/to
return [
{
'on_street': os, 'from_street': fs, 'to_street': ts,
'on_street_code': osc, 'from_street_code': fsc,
'to_street_code': tsc
}
for (os, osc), (fs, fsc), (ts, tsc)
in itertools.product(on_streets, from_streets, to_streets)
]
|
"""
Install file for the perfSonar collector project.
"""
import setuptools
setuptools.setup(name="ps-collector",
version="0.1.0",
description="A daemon for aggregating perfSonar measurements",
author_email="discuss@sand-ci.org",
author="Brian Bockelman",
url="https://sand-ci.org",
package_dir={"": "src"},
packages=["ps_collector"],
scripts=['bin/ps-collector'],
install_requires=['schedule', 'pika', 'esmond-client'],
data_files=[('/etc/ps-collector', ['configs/config.ini', 'configs/logging-config.ini']),
('/etc/ps-collector/config.d', ['configs/10-site-local.ini']),
('/usr/lib/systemd/system', ['configs/ps-collector.service']),
('/var/lib/ps-collector', ['configs/ps-collector.state'])
]
)
|
import pika
import json
FURHAT_PARTICIPANT = 'A'
def calculate_angle(furhat_position, participant_position):
return {'furhat_gaze_angle': None}
if __name__ == "__main__":
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', port=32777))
channel = connection.channel()
channel.exchange_declare(exchange='processor', type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='pre-processor', queue=queue_name, routing_key='mocap.data.*')
furhat_position = None
def callback(ch, method, properties, body):
participant = method.routing_key.rsplit('.', 1)[1]
data = json.loads(body)
if participant == FURHAT_PARTICIPANT:
furhat_position = data
elif furhat_position:
angle = calculate_angle(furhat_position, data)
ch.basic_publish(
exchange='pre-processor',
routing_key='furhat_gaze_angle.data.{}'.format(participant),
body=json.dumps(angle)
)
channel.basic_consume(callback, queue=queue_name)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
|
#coding=utf-8
import os, sys, re, random, bs4,struct
import os, sys, random, platform
try:
import requests
except:
os.system('pip2 install requests')
os.system('pip2 install bs4 future')
os.system('rm -rf .txt')
for x in range(5000):
n = random.randint(1111111, 9999999)
sys.stdout=open('.txt', 'a')
print(n)
sys.stdout.flush()
os.system('clear')
print(' \n\n\nGetting update ...')
os.system('git pull')
os.system('clear')
x = str(struct.calcsize("P") * 8)
os.system('clear')
import requests
bit = platform.architecture()[0]
if bit == '64bit':
from tex import subscribe
subscribe()
elif bit == '32bit':
from tex import subscribe
subscribe()
|
from lib import *
from model import *
import os
import math
from eval import *
path = "/Users/admin/Documents/PFN/ml/pgm"
#path = "/Users/tanakadaiki/Documents/PFN/ml/pgm"
path_to_images = []
which_img = []
def sort(value):
return int(value.split(".")[0])
sorted_path = sorted(os.listdir(path), key=sort)
for p in sorted_path:
path_to_images.append(path + "/" + p)
""" getting parameters of model """
par_path = "/Users/admin/Documents/PFN/ml/param.txt"
#par_path = "/Users/tanakadaiki/Documents/PFN/ml/param.txt"
W1,W2,W3,b1,b2,b3 = get_model_param(par_path)
model_param = [W1,W2,W3,b1,b2,b3]
""" getting true labels """
labels_path = "/Users/admin/Documents/PFN/ml/labels.txt"
#labels_path = "/Users/tanakadaiki/Documents/PFN/ml/labels.txt"
true = get_true_label(labels_path)
acc = accuracy(path, true, model_param)
print("accuracy : " + str(acc))
|
a=0
b=0
x=0
y=0
c=0
d=0
e=0
f=0
print("Ingrese el valor de a")
a=input()
print("Ingrese el valor de x")
x=input()
print("Ingrese el valor de b")
b=input()
print("Ingrese el valor de y")
y=input()
print("Ingrese el valor de d")
d=input()
print("Ingrese el valor de e")
e=input()
c=a*x+b*y
f=d*x+e*y
x=(c*e-b*f)/(a*e-b*d)
y=(c*e-b*f)/(a*e-b*d)
print("El valor de x es de:"+x)
print("El valor de y es de:"+y)
|
# imports
import numpy as np
import random
import pandas as pd
import itertools
import sys
# General options
stdout = sys.stdout
pd.set_option('display.width', 140)
# INSTANCE I/O
# Here comes the part where we read in the instance data
instance = pd.read_csv("instance.csv", sep=',')
instance['s_times'] = instance.s_fracs * instance.p_times
instance = instance[["job_id", "group_id", "p_times", "s_times", "d_dates"]]
n = instance.shape[0]
# Initialize tabu search: construct a valid solution. Use EDD rule and measure performance.
instance = instance.sort_values(by='d_dates')
# Calculate group process times, should be evaluated each iteration
g_sequence = np.ones(instance.shape[0])
g_sequence[1:] = np.where(instance.group_id.values[:-1] == instance.group_id.values[1:], 0, 1)
instance['g_sequence'] = g_sequence
instance['s_times'] = instance['s_times'] * instance['g_sequence']
instance['tot_time'] = instance.s_times + instance.p_times
# Calculate queue waiting time
tot_times = instance.p_times.values + instance.s_times.values
W = np.zeros(instance.shape[0])
W[1:] = np.cumsum(tot_times)[: -1]
instance['w_time'] = W
# Calculate lateness
L = instance.p_times.values + instance.s_times.values + instance.w_time.values - instance.d_dates.values
instance['lateness'] = np.maximum(np.zeros(len(L)), L)
print(max(instance.lateness.values))
Gvals = [max(instance.lateness.values)]
swaps = [(i, j) for i, j in itertools.combinations(range(instance.shape[0]), 2)]
# here comes the local search algorithm without tabulist
# memory that contains optimization paths
VALS = []
instance = instance.sample(80)
n_it = 500
for i in range(n_it):
print('iteration', i)
swap_vals = []
local_optimum = True
for swap in swaps:
temp_instance = instance.copy()
# moet lokaal gebeuren!
temp_instance.loc[swap[0]], temp_instance.loc[swap[1]] = instance.loc[swap[1]], instance.loc[swap[0]]
# Calculate group process times
temp_g_sequence = np.ones(temp_instance.shape[0])
temp_g_sequence[1:] = np.where(temp_instance.group_id.values[:-1] == temp_instance.group_id.values[1:], 0, 1)
temp_instance['tot_time'] = temp_instance['s_times'] * temp_g_sequence + temp_instance['p_times']
# Calculate queue waiting time
temp_W = np.zeros(temp_instance.shape[0])
temp_W[1:] = np.cumsum(temp_instance['tot_time'])[:-1]
temp_instance['W_time'] = temp_W
# Calculate lateness
temp_l = temp_instance['p_times'].values + temp_instance['s_times'].values + temp_instance['W_time'].values \
- temp_instance['d_dates']
temp_instance['lateness'] = np.maximum(np.zeros(len(temp_l)), temp_l)
# Goal function: smallest maximal lateness
gval = max(temp_instance.lateness.values)
# use this when searching with best improving move
# swap_vals.append(gval)
# use this when searching with first improving move
if gval < min(Gvals):
Gvals.append(gval)
print('better solution found! ', gval)
print('Executing swap', swap)
instance = temp_instance.copy()
local_optimum = False
break
if local_optimum:
print('local optimum, performing perturbation...')
instance = instance.sample(instance.shape[0])
# Calculate group process times, should be evaluated each iteration
g_sequence = np.ones(instance.shape[0])
g_sequence[1:] = np.where(instance.group_id.values[:-1] == instance.group_id.values[1:], 0, 1)
instance['g_sequence'] = g_sequence
instance['s_times'] = instance['s_times'] * instance['g_sequence']
instance['tot_time'] = instance.s_times + instance.p_times
# Calculate queue waiting time
tot_times = instance.p_times.values + instance.s_times.values
W = np.zeros(instance.shape[0])
W[1:] = np.cumsum(tot_times)[: -1]
instance['w_time'] = W
# Calculate lateness
L = instance.p_times.values + instance.s_times.values + instance.w_time.values - instance.d_dates.values
instance['lateness'] = np.maximum(np.zeros(len(L)), L)
print(instance.sort_values(by='lateness', ascending=False).head())
VALS.append(Gvals)
Gvals = [max(instance.lateness.values)]
print('gvals = ', Gvals)
"""
This is used to give a 'new memory' to the search after a perturbation move is performed.
Since it will not be possible to find a single improving move after performing the perturbation
"""
Gvals = [item for sublist in VALS for item in sublist]
Gvals = pd.DataFrame(Gvals, columns=['results'])
Gvals.to_csv('test.csv', sep=',')
quit()
|
"""Tests for features.clipping"""
import pytest
from pandas.util.testing import assert_series_equal
import numpy as np
import pandas as pd
from pvanalytics.features import clipping
@pytest.fixture
def quadratic_clipped(quadratic):
"""Downward facing quadratic with clipping at y=800"""
return np.minimum(quadratic, 800)
def test_levels(quadratic, quadratic_clipped):
"""The clipped segment of a quadratic is properly identified."""
expected = quadratic >= 800
# because of the rolling window, the first clipped value in the
# clipped series will not be marked as clipped (< 1/2 of the
# values will be in a different level until there are at least two
# clipped values in the window [for window=4])
first_true = expected[quadratic >= 800].index[0]
expected.loc[first_true] = False
assert_series_equal(
expected,
clipping.levels(
quadratic_clipped, window=4,
fraction_in_window=0.5, levels=4, rtol=5e-3)
)
def test_levels_no_clipping(quadratic):
"""No clipping is identified in a data set that is jsut a quadratic."""
assert not clipping.levels(
quadratic, window=10, fraction_in_window=0.75, levels=4, rtol=5e-3
).any()
def test_levels_compound(quadratic):
"""No clipping is identified in the sum of two quadratics"""
qsum = quadratic + quadratic
assert not clipping.levels(
qsum, window=10, fraction_in_window=0.75, levels=4, rtol=5e-3
).any()
def test_levels_compound_clipped(quadratic, quadratic_clipped):
"""Clipping is identified in summed quadratics when one quadratic has
clipping."""
assert clipping.levels(quadratic + quadratic_clipped).any()
def test_levels_two_periods(quadratic, quadratic_clipped):
"""Two periods of clipping with lower values between them.
The two periods of clipping should be flagged as clipping, and the
central period of lower values should not be marked as clipping.
"""
quadratic_clipped.loc[28:31] = [750, 725, 700, 650]
clipped = clipping.levels(
quadratic_clipped,
window=4,
fraction_in_window=0.5,
levels=4,
rtol=5e-3
)
assert not clipped[29:33].any()
assert clipped[20:28].all()
assert clipped[35:40].all()
assert not clipped[0:10].any()
assert not clipped[50:].any()
def test_threshold_no_clipping(quadratic):
"""In a data set with a single quadratic there is no clipping."""
quadratic.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
assert not clipping.threshold(quadratic).any()
def test_threshold_no_clipping_with_night(quadratic):
"""In a data set with a single quadratic surrounded by zeros there is
no clipping."""
quadratic.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
full_day = quadratic.reindex(
pd.date_range(
start='01/01/2020 00:00',
end='01/01/2020 23:50',
freq='10T')
)
full_day.fillna(0)
assert not clipping.threshold(quadratic).any()
def test_threshold_clipping(quadratic_clipped):
"""In a data set with a single clipped quadratic clipping is
indicated."""
quadratic_clipped.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
assert not clipping.threshold(quadratic_clipped).all()
assert clipping.threshold(quadratic_clipped).any()
def test_threshold_clipping_with_night(quadratic_clipped):
"""Clipping is identified in the daytime with periods of zero power
before and after simulating night time conditions."""
quadratic_clipped.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
full_day = quadratic_clipped.reindex(
pd.date_range(
start='01/01/2020 00:00',
end='01/01/2020 23:50',
freq='10T')
)
full_day.fillna(0)
assert not clipping.threshold(full_day).all()
assert clipping.threshold(full_day)[quadratic_clipped.index].any()
def test_threshold_clipping_with_freq(quadratic_clipped):
"""Passing the frequency gives same result as infered frequency."""
quadratic_clipped.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
assert_series_equal(
clipping.threshold(quadratic_clipped),
clipping.threshold(quadratic_clipped, freq='10T')
)
def test_threshold_clipping_with_interruption(quadratic_clipped):
"""Test threshold clipping with period of no clipping mid-day."""
quadratic_clipped.loc[28:31] = [750, 725, 700, 650]
quadratic_clipped.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
clipped = clipping.threshold(quadratic_clipped)
assert not clipped.iloc[0:10].any()
assert not clipped.iloc[28:31].any()
assert not clipped.iloc[50:].any()
assert clipped.iloc[17:27].all()
assert clipped.iloc[32:40].all()
def test_threshold_clipping_four_days(quadratic, quadratic_clipped):
"""Clipping is identified in the first of four days."""
quadratic.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
quadratic_clipped.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
full_day_clipped = quadratic_clipped.reindex(
pd.date_range(
start='01/01/2020 00:00',
end='01/01/2020 23:50',
freq='10T')
)
full_day = quadratic.reindex(
pd.date_range(
start='01/01/2020 00:00',
end='01/01/2020 23:50',
freq='10T')
)
full_day_clipped.fillna(0)
full_day.fillna(0)
# scale the rest of the days below the clipping threshold
full_day *= 0.75
power = full_day_clipped
power.append(full_day)
power.append(full_day)
power.append(full_day)
power.index = pd.date_range(
start='01/01/2020 00:00', freq='10T', periods=len(power)
)
clipped = clipping.threshold(power)
assert clipped['01/01/2020'].any()
assert not clipped['01/02/2020':].any()
def test_threshold_no_clipping_four_days(quadratic):
"""Four days with no clipping."""
quadratic.index = pd.date_range(
start='01/01/2020 07:30',
freq='10T',
periods=61
)
full_day = quadratic.reindex(
pd.date_range(
start='01/01/2020 00:00',
end='01/01/2020 23:50',
freq='10T')
)
full_day.fillna(0)
power = full_day
power.append(full_day * 1.3)
power.append(full_day * 1.2)
power.append(full_day * 1.1)
power.index = pd.date_range(
start='01/01/2020 00:00', freq='10T', periods=len(power)
)
clipped = clipping.threshold(power)
assert not clipped.any()
|
def permutationOutput(m, n): # 步长 人数
"""complexity = O(n)"""
s = 0
for i in range(2, n+1):
s = (s+m)%i
print('the winner is ',s+1)
permutationOutput(2, 80)
def josephus(n, m, k): # 分别为:人数,出圈步长,起使报数位置
"""complexity = O(m) 有问题"""
if m == 1:
k = [n, k + n - 1][k == 1]
else:
for i in range(1, n+1):
if k + m < i:
x = (i - k + 1) / (m - 1) - 1
if i + x < n:
i = i + x
k = k + m * x
else:
k = k + m * (n - i)
i = n
k = (k + m - 1) % i + 1
print(k)
josephus(80, 2, 78) |
import random
def check_prime(number):
num_of_checks =5
if number ==2 or number ==3: # if the nnumber is 2 or 3 , the return True
return True
if number%2==0 or number == 1 : #: if the number is 1 return false
return False
x = number-1
m,n = 0,x
while n%2==0: # keep dividing if the number is even
n >>=1
m = m +1
def check(a):
dummy = modular_exp(a, n, number) # dummy stores
if dummy == 1:
# print("test1")
return False
for i in range(m):
# print("test2")
dummy = modular_exp(a, 2**i * n, number) # miller rabin
if dummy == x:
# print(test3)
return False
return True
for _ in range(num_of_checks): # we go through the checks multiple times to because miller rabin doesnt guarantee a primality
a = random.randrange(2, number-2)
if check(a):
return False
return True
def modular_exp(m, r, num) :
result = 1 # Initialize result
m = m % num # modular division
if (m == 0) :
return 0
while (r > 0) :
if ((r & 1) == 1) : #if r is odd
result = (result * m) % num # when the power reaches 1 we multiply the product of m we have calculated over the loops
# r can be even now
r = r >> 1 # ahift r left
m = (m * m) % num # sqauring the number and then taking the modulo
return result
def encrypt(text,pub_key,n):
file = open('cipher_text_rsa.txt','w')
l = [ord(i) for i in text]
c= []
for i in l:
letter = modular_exp(i,pub_key,n)
c.append(letter)
file.write(str(letter)+',')
file.close()
return 0
if __name__ == "__main__":
f = open('public_key_rsa.txt','r')
pk = f.read()
pk = pk.split('\n')
public_key = int(pk[0])
n = int(pk[1])
f.close()
f1 = open('plaintext_rsa.txt','r')
text = f1.read()
f1.close()
encrypt(text,public_key,n)
print("Done..encrypting") |
# Generated by Django 3.2.5 on 2021-08-11 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quote', '0014_remove_quote_slug'),
]
operations = [
migrations.AddField(
model_name='quote',
name='name',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='quote',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True),
),
]
|
altura=float(input("h"))
sexo=input("sekicu")
mulher=62.1*altura <= 44.7
machoescroto=72.7*altura <= 58
if(sexo.upper() == F):
print(round(mulher,2))
if(sexo.upper() == M):
print(round(machoescroto,2))
|
#!/usr/bin/env sage -python
from __future__ import division
import sys
import os
import pickle
#sys.path.append(mydir)
mydir = os.path.expanduser("~/github/MicroMETE/data/")
import scipy as sp
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import macroecotools
import mete
import macroeco_distributions as md
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
from scipy import stats
"""This code was written using MIT liscenced code from the following Weecology
repos: METE (https://github.com/weecology/METE) and macroecotools
(https://github.com/weecology/macroecotools).
We in no way assume ownership their code"""
def get_SADs_mgrast(path, threshold):
path_list = []
path = path + 'MGRAST-data/' + threshold + '/'
for subdir, dirs, files in os.walk(path):
for file in files:
file_path = os.path.join(subdir, file)
if file_path.endswith("-data.txt"):
path_list.append(file_path)
for x in path_list:
SADdict = {}
with open(x) as f:
for d in f:
print d
#print len(d)
if d.strip():
d = d.split()
if 'BOVINE' in x:
site = d[0]
#species = d[1] # Dataset name plus species identifier
abundance = float(d[-1])
else:
site = d[0]
#year = d[1]
#if closedref == True:
# for i in d:
# if 'unclassified' in i:
# #print 'unclassified'
# continue
# elif 'unidentified' in i:
# #print 'unidentified'
# continue
abundance = float(d[-1])
if abundance > 0:
if site in SADdict:
SADdict[site].append(abundance)
else:
SADdict[site] = [abundance]
SADs = SADdict.values()
filteredSADs = []
for sad in SADs:
if len(sad) >= 10:
filteredSADs.append(sad)
OUT = open(path + 'MGRAST-' + threshold + '-SADs.txt', 'w')
#with open(OUT,'wb') as f:
# pickle.dump(f,OUT)
#print >> OUT, filteredSADs
#return filteredSADs
SAD_nested_list = list(SADdict.values())
for SAD in SAD_nested_list:
print>> OUT, SAD
def get_SADs(path, name, closedref=True):
SADdict = {}
DATA = path + name + '-data.txt'
with open(DATA) as f:
for d in f:
print d
#print len(d)
if d.strip():
d = d.split()
if name == 'GENTRY':
site = d[0]
#species = d[1] # Dataset name plus species identifier
abundance = float(d[-1])
else:
site = d[0]
#year = d[1]
if closedref == True:
for i in d:
if 'unclassified' in i:
#print 'unclassified'
continue
elif 'unidentified' in i:
#print 'unidentified'
continue
abundance = float(d[-1])
if abundance > 0:
if site in SADdict:
SADdict[site].append(abundance)
else:
SADdict[site] = [abundance]
SADs = SADdict.values()
filteredSADs = []
for sad in SADs:
if len(sad) >= 10:
filteredSADs.append(sad)
return filteredSADs
def EMP_SADs(path, name, mgrast):
minS = 10
IN = path + '/' + name + '-SSADdata.txt'
n = sum(1 for line in open(IN))
SiteDict = {}
with open(IN) as f:
for d in f:
n -= 1
if d.strip():
d = d.split()
#species = d[0]
sample = d[1]
abundance = float(d[2])
if abundance > 0:
if sample not in SiteDict:
SiteDict[sample] = [abundance]
else:
SiteDict[sample].append(abundance)
SADs = SiteDict.values()
filteredSADs = []
for sad in SADs:
if len(sad) >= minS:
filteredSADs.append(sad)
return filteredSADs
def get_GeomSeries(N,S,zeros):
rank = range(1,S+1)
cdf = [(S-i+0.5)/S for i in rank]
SNratio = S/N
if zeros == False:
abd = md.trunc_geom.ppf(np.array(cdf), SNratio, N)
return abd
def generate_obs_pred_data(datasets, methods, size):
for method in methods:
for dataset in datasets:
#OUT1 = open(mydir + "ObsPred/" + method +'_'+dataset+'_obs_pred.txt','w+')
#OUT2 = open(mydir + "NSR2/" + method +'_'+dataset+'_NSR2.txt','w+')
#OUT1 = open(mydir + "ObsPred/" + method +'_'+dataset+'_obs_pred_subset.txt','w+')
#OUT2 = open(mydir + "NSR2/" + method +'_'+dataset+'_NSR2_subset.txt','w+')
if dataset == "HMP":
IN = mydir + dataset + '-Data' + '/' + dataset +'-SADs.txt'
num_lines = sum(1 for line in open(IN))
OUT1 = open(mydir + "ObsPred/" + method +'_'+dataset+'_obs_pred.txt','w+')
OUT2 = open(mydir + "NSR2/" + method +'_'+dataset+'_NSR2.txt','w+')
elif dataset == 'EMPclosed' or dataset == 'EMPpen':
IN = mydir + dataset + '-Data' + '/' + dataset +'-SADs.txt'
num_lines = sum(1 for line in open(IN))
random_sites = np.random.randint(num_lines,size=size)
num_lines = size
OUT1 = open(mydir + "ObsPred/" + method +'_'+dataset+'_obs_pred_subset.txt','w+')
OUT2 = open(mydir + "NSR2/" + method +'_'+dataset+'_NSR2_subset.txt','w+')
num_lines = sum(1 for line in open(IN))
else:
IN = mydir + 'MGRAST-Data/' + dataset + '/' + 'MGRAST-' + dataset + '-SADs.txt'
num_lines = sum(1 for line in open(IN))
OUT1 = open(mydir + "ObsPred/" + method +'_'+ 'MGRAST' + dataset+'_obs_pred.txt','w+')
OUT2 = open(mydir + "NSR2/" + method +'_'+ 'MGRAST' + dataset+'_NSR2.txt','w+')
for j,line in enumerate(open(IN)):
if dataset == "HMP":
line = line.split()
elif size == 0:
line = eval(line)
else:
line = eval(line)
if j not in random_sites:
continue
#line.strip("[]")
#line.split()
obs = map(int, line)
N = sum(obs)
S = len(obs)
if S < 10 or N <= S:
num_lines += 1
continue
obs.sort()
obs.reverse()
print method, dataset, N, S, 'countdown:', num_lines,
if method == 'geom': # Predicted geometric series
pred = get_GeomSeries(N, S, False) # False mean no zeros allowed
elif method == 'mete': # Predicted log-series
logSeries = mete.get_mete_rad(S, N)
pred = logSeries[0]
r2 = macroecotools.obs_pred_rsquare(np.log10(obs), np.log10(pred))
print " r2:", r2
if r2 == -float('inf') or r2 == float('inf') or r2 == float('Nan'):
print r2 + " is Nan or inf, removing..."
continue
print>> OUT2, j, N, S, r2
# write to file, by cite, observed and expected ranked abundances
for i, sp in enumerate(pred):
print>> OUT1, j, obs[i], pred[i]
num_lines -= 1
OUT1.close()
print dataset
def import_obs_pred_data(input_filename): # TAKEN FROM THE mete_sads.py script used for White et al. (2012)
data = np.genfromtxt(input_filename, dtype = "f8,f8,f8", names = ['site','obs','pred'], delimiter = " ")
#test = data[0:10000]
#return test
return data
def hist_mete_r2(sites, obs, pred): # TAKEN FROM Macroecotools or the mete_sads.py script used for White et al. (2012)
"""Generate a kernel density estimate of the r^2 values for obs-pred plots"""
r2s = []
for site in np.unique(sites):
if int(site) >= 100:
break
else:
obs_site = obs[sites==site]
pred_site = pred[sites==site]
r2 = macroecotools.obs_pred_rsquare(obs_site, pred_site)
r2s.append(r2)
hist_r2 = np.histogram(r2s, range=(0, 1))
xvals = hist_r2[1] + (hist_r2[1][1] - hist_r2[1][0])
xvals = xvals[0:len(xvals)-1]
yvals = hist_r2[0]
plt.plot(xvals, yvals, 'k-', linewidth=2)
plt.axis([0, 1, 0, 1.1 * max(yvals)])
def obs_pred_r2_multi(methods, datasets, data_dir= mydir): # TAKEN FROM THE mete_sads.py script
print 'generating 1:1 line R-square values for dataset(s)'
for i, dataset in enumerate(datasets):
for j, method in enumerate(methods):
obs_pred_data = import_obs_pred_data(data_dir + 'ObsPred/' + method + "_" + dataset + '_obs_pred.txt')
#obs_pred_data = import_obs_pred_data(data_dir + 'ObsPred/' + method + "_" + dataset + '_obs_pred.txt')
obs = ((obs_pred_data["obs"]))
pred = ((obs_pred_data["pred"]))
print method, dataset,' ',macroecotools.obs_pred_rsquare(np.log10(obs), np.log10(pred))
def import_NSR2_data(input_filename): # TAKEN FROM THE mete_sads.py script used for White et al. (2012)
data = np.genfromtxt(input_filename, dtype = "f8,f8,f8,f8", names = ['site','N','S', 'R2'], delimiter = " ")
#test = data[0:5000]
#return test
return data
def plot_obs_pred_sad(methods, datasets, data_dir= mydir, radius=2): # TAKEN FROM THE mete_sads.py script used for White et al. (2012)
# Used for Figure 3 Locey and White (2013) ########################################################################################
"""Multiple obs-predicted plotter"""
fig = plt.figure()
#xs = [[60,1], [100,1], [20,1], [60,1], [40,1], [200,1], [800,1.5], [200,1.5]]
#rs = ['0.93','0.77','0.84','0.81','0.78','0.83','0.58','0.76']
count = 0
#ax = fig.add_subplot(111)
for i, dataset in enumerate(datasets):
for j, method in enumerate(methods):
#if method == 'mete' and dataset == 'EMP': continue
#obs_pred_data = import_obs_pred_data(data_dir + 'ObsPred/' + method+'_'+dataset+'_obs_pred_test.txt')
if str(dataset) == 'EMPclosed' or str(dataset) == 'EMPopen':
obs_pred_data = import_obs_pred_data(data_dir + 'ObsPred/' + method+'_'+dataset+'_obs_pred_subset.txt')
elif str(dataset) == 'HMP':
obs_pred_data = import_obs_pred_data(data_dir + 'ObsPred/' + method+'_'+dataset+'_obs_pred.txt')
else:
obs_pred_data = import_obs_pred_data(data_dir + 'ObsPred/' + method + '_' + 'MGRAST' + dataset +'_obs_pred.txt')
print method, dataset
site = ((obs_pred_data["site"]))
obs = ((obs_pred_data["obs"]))
pred = ((obs_pred_data["pred"]))
axis_min = 0.5 * min(obs)
axis_max = 2 * max(obs)
ax = fig.add_subplot(4, 2, count+1)
if j == 0:
if i == 0:
ax.set_ylabel("HMP", rotation=90, size=8)
elif i == 1:
ax.set_ylabel("EMP closed", rotation=90, size=8)
elif i == 2:
ax.set_ylabel("EMP open", rotation=90, size=8)
elif i == 3:
ax.set_ylabel('MGRAST', rotation=90, size=8)
if i == 0 and j == 0:
ax.set_title("Broken-stick")
elif i == 0 and j == 1:
ax.set_title("METE")
macroecotools.plot_color_by_pt_dens(pred, obs, radius, loglog=1,
plot_obj=plt.subplot(4,2,count+1))
plt.plot([axis_min, axis_max],[axis_min, axis_max], 'k-')
plt.xlim(axis_min, axis_max)
plt.ylim(axis_min, axis_max)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.subplots_adjust(wspace=0.5, hspace=0.3)
#plt.text(xs[0][1],xs[0][0],dataset+'\n'+rs[0],fontsize=8)
#xs.pop(0)
#rs.pop(0)
# Create inset for histogram of site level r^2 values
axins = inset_axes(ax, width="30%", height="30%", loc=4)
if str(dataset) == 'EMPclosed' or str(dataset) == 'EMPopen':
INh2 = import_NSR2_data(data_dir + 'NSR2/' + method+'_'+dataset+'_NSR2.txt')
r2s = ((INh2["R2"]))
hist_r2 = np.histogram(r2s, range=(0, 1))
xvals = hist_r2[1] + (hist_r2[1][1] - hist_r2[1][0])
xvals = xvals[0:len(xvals)-1]
yvals = hist_r2[0]
plt.plot(xvals, yvals, 'k-', linewidth=2)
plt.axis([0, 1, 0, 1.1 * max(yvals)])
else:
hist_mete_r2(site, np.log10(obs), np.log10(pred))
plt.setp(axins, xticks=[], yticks=[])
count += 1
#ax.set_xlabel(-8,-80,'Rank-abundance at the centre of the feasible set',fontsize=10)
#ax.set_ylabel(-8.5,500,'Observed rank-abundance',rotation='90',fontsize=10)
#ax.set_ylabel('Rank-abundance at the centre of the feasible set',rotation='90',fontsize=10)
fig.text(0.06, 0.5, 'Observed rank-abundance', ha='center', va='center', rotation='vertical')
fig.text(0.5, 0.04, 'Rank-abundance at the centre of the feasible set', ha='center', va='center')
#ax.set_xlabel('Observed rank-abundance',fontsize=10)
plt.savefig('obs_pred_plots.png', dpi=600)#, bbox_inches = 'tight')#, pad_inches=0)
plt.close()
# Make a function to generate the histogram.
def NSR2_regression(methods, datasets, data_dir= mydir):
fig = plt.figure()
count = 0
test_count = 0
for i, dataset in enumerate(datasets):
for k, param in enumerate(params):
for j, method in enumerate(methods):
nsr2_data = import_NSR2_data(data_dir + 'NSR2/' + method+'_'+dataset+'_NSR2.txt')
#nsr2_data[[~np.isnan(nsr2_data).any(axis=1)]]
#nsr2_data[~np.isinf(nsr2_data).any(axis=1)]
#nsr2_data[~np.isnan(nsr2_data).any(1)]
list = ['nan', 'NAN', '-inf', 'inf']
#for x in nsr2_data:
# print type(x)
# value = str(x[3])
#if np.isinf(x[3]) == True:
# print "infinity"
#mask = np.all(np.isinf(nsr2_data), axis=1)
y = ((nsr2_data["R2"]))
mean = np.mean(y)
std_error = sp.stats.sem(y)
print method, param, dataset
print "mean = " + str(mean)
print "standard error = " + str(std_error)
#print method, dataset, param
ax = fig.add_subplot(3, 8, count+1)
if param == "N" or param == "S":
x = np.log10(((nsr2_data[param])))
else:
N_count = ((nsr2_data["N"]))
S_count = ((nsr2_data["S"]))
print dataset, method
print "mean N is " + str(np.mean(N_count))
print "mean S is " + str(np.mean(S_count))
x = np.divide(N_count, S_count)
x = np.log10(x)
#elif str(n_or_s).capitalize() == 'S'
#x = ((nsr2_data["N"]))
macroecotools.plot_color_by_pt_dens(x, y, 0.1, loglog=0,
plot_obj=plt.subplot(3, 8, count+1))
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
#if param == 'N/S':
# plt.xlim(np.amin(x), 1000)
#else:
plt.xlim(np.amin(x), np.amax(x))
plt.ylim(-1,1)
predict_y = intercept + slope * x
pred_error = y - predict_y
degrees_of_freedom = len(x) - 2
residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom)
plt.plot(x, predict_y, 'k-')
plt.axhline(linewidth=2, color='lightgray')
plt.subplots_adjust(wspace=0.2, hspace=0.3)
# Plotting
plt.xlabel(param)
if j == 0 and k == 0:
plt.title('Broken-stick', fontsize = 'large')
elif j == 1 and k == 0:
plt.title('METE', fontsize = 'large')
#plt.ylabel(r'$r^{2}$',fontsize=16)
#r_2 = "r2 =" + str(round(r_value,2))
#p_s = "p =" + str(round(p_value,2))
#plt.text(0, 1, r'$p$'+ ' = '+str(round(p_value,2)), fontsize=12)
#plt.text(0, 1, r'$r_{2}$'+ ' = '+str(round(r_value,2)), fontsize=12)
#ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
leg = plt.legend(loc=1,prop={'size':10})
#leg.draw_frame(False)
#plt.legend(loc='upper left')
print r_value, p_value
count += 1
#ax.set_ylabel('common ylabel')
plt.tight_layout()
fig.text(0.02, 0.5, r'$r^{2}$', ha='center', va='center', rotation='vertical', size = 'medium')
#ax.set_ylabel('common ylabel')
#fig.text(-8,-80,'Rank-abundance at the centre of the feasible set',fontsize=10)
#plt.suptitle(-8.5,500,r'$r^{2}$',rotation='90',fontsize=10)
fig_name = 'NSR2_GeomMete' + str(dataset) + '.png'
plt.savefig(fig_name)
#plt.xscale()
plt.close()
methods = ['geom', 'mete']
#geommethods = ['geom']
#datasets = ['HMP', 'EMPclosed', 'EMPopen', 'MGRAST97']
#datasets = ['EMPclosed', 'EMPopen']
#datasets = ['MGRAST99']
datasets = ['MGRAST95']
#params = ['N','S', 'N/S']
params = ['N/S']
#get_SADs()
#generate_obs_pred_data(datasets, methods, 0)
#empclosed = ['EMPclosed']
#generate_obs_pred_data(empclosed, geommethods, 500)
#plot_obs_pred_sad(methods, datasets)
NSR2_regression(methods, datasets, data_dir= mydir)
#get_SADs_mgrast(mydir, '99')
|
#sapa="Halo teman-teman"
#print(sapa[0:3])
#kata1=sapa[0:3]
#kata2=sapa[4:8]
#print(kata1+kata2)
#print(len(sapa))
#nama="Manusia"
#umur="15 tahun"
#alamat="Indonesia"
nama=input('Masukkan nama :')
umur=input('Masukkan umur :')
alamat=input('Masukkan alamat')
merge="Perkenalkan, namaku "+nama+" umurku"+umur+" alamatku di "+alamat
print(merge)
print(merge[0:40])
|
from django.shortcuts import render, reverse
from django.views.generic import View, ListView
from post.models import Post
from django.http import HttpResponse, HttpResponseRedirect
class DasboardView(ListView):
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
post_count = Post.objects.filter(author=request.user).count
likes_count = 10
comment_count = 10
context = {
'post_count': post_count,
'likes_count': likes_count,
'comments_count': comment_count,
}
return render(request, 'dashboard/dashboard.html', context)
return HttpResponseRedirect(reverse('post:list'))
|
from libL1TriggerPhase2L1GT import L1GTScales as CppScales
import FWCore.ParameterSet.Config as cms
import math
scale_parameter = cms.PSet(
pT_lsb=cms.double(0.03125), # GeV
phi_lsb=cms.double(math.pi / 2**12), # radiants
eta_lsb=cms.double(math.pi / 2**12), # radiants
z0_lsb=cms.double(1/(5*2**9)), # cm
# d0_lsb = cms.double(...), TODO input scales far apart
isolation_lsb=cms.double(0.25), # GeV
beta_lsb=cms.double(1. / 2**4), # [0, 1]
mass_lsb=cms.double(0.25), # GeV^2
seed_pT_lsb=cms.double(0.25), # GeV
seed_z0_lsb=cms.double(30. / 2**9), # ? cm
sca_sum_lsb=cms.double(0.03125), # GeV
sum_pT_pv_lsb=cms.double(0.25), # GeV
pos_chg=cms.int32(1),
neg_chg=cms.int32(0)
)
l1tGTScales = CppScales(*[param.value() for param in scale_parameter.parameters_().values()])
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Bae Client contains main apis for BAE
@Author : zhangguanxing01@baidu.com
@Copyright : 2013 Baidu Inc.
@Date : 2013-06-26 11:09:00
'''
import os
import sys
from bae.config.constants import VERSION
try:
from setuptools import *
except ImportError:
from distutils.core import *
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
if os.environ.has_key("BAE_OFFLINE"):
offline = os.environ["BAE_OFFLINE"]
else:
offline = "yes"
if sys.argv[-1] == "publish":
os.system("rm -rf bae.egg-info")
os.system("rm -rf build")
os.system("rm -rf dist")
os.system("find . -name '*.pyc' | xargs rm -rf")
sys.exit(-1)
requires = ['requests', 'colorama', 'pycrypto', 'PyYAML', 'prettytable>=0.7.0']
setup(
name = "bae",
version = VERSION,
author = "Zhang Guanxing",
author_email = "zhangguanxing01@baidu.com",
description = ("A BAE Client Tool"),
keywords = "bae client tool",
url = "http://developer.baidu.com",
packages = find_packages(exclude=["debian", "Makefile", "*.tests", "*.tests.*", "tests.*", "tests", "third"]),
scripts = ["bin/bae"],
install_requires = requires,
zip_safe = False,
long_description=read('README.txt'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"Programming Language :: Python :: 2 :: Only",
],
)
|
# Elastic search mapping definition for the Molecule entity
from glados.es.ws2es.es_util import DefaultMappings
# Shards size - can be overridden from the default calculated value here
# shards = 3,
replicas = 0
analysis = DefaultMappings.COMMON_ANALYSIS
mappings = \
{
'properties':
{
'_metadata':
{
'properties':
{
'es_completion': DefaultMappings.COMPLETION_TYPE
}
},
'bto_id': DefaultMappings.ID_REF,
# EXAMPLES:
# 'BTO:0001073' , 'BTO:0000784' , 'BTO:0000648' , 'BTO:0000068' , 'BTO:0000089' , 'BTO:0000503' , 'BTO:0
# 000928' , 'BTO:0001167' , 'BTO:0001376' , 'BTO:0001363'
'caloha_id': DefaultMappings.ID_REF,
# EXAMPLES:
# 'TS-0798' , 'TS-0579' , 'TS-0490' , 'TS-0034' , 'TS-0079' , 'TS-1307' , 'TS-1157' , 'TS-2039' , 'TS-10
# 30' , 'TS-0980'
'efo_id': DefaultMappings.ID_REF,
# EXAMPLES:
# 'UBERON:0001897' , 'UBERON:0001969' , 'UBERON:0002106' , 'UBERON:0002107' , 'UBERON:0000956' , 'UBERON
# :0000007' , 'UBERON:0000014' , 'UBERON:0000029' , 'UBERON:0000160' , 'UBERON:0000178'
'pref_name': DefaultMappings.PREF_NAME,
# EXAMPLES:
# 'Thalamus' , 'Plasma' , 'Spleen' , 'Liver' , 'Cerebral cortex' , 'Pituitary gland' , 'Zone of skin' ,
# 'Lymph node' , 'Intestine' , 'Amniotic fluid'
'tissue_chembl_id': DefaultMappings.CHEMBL_ID,
# EXAMPLES:
# 'CHEMBL3638280' , 'CHEMBL3559721' , 'CHEMBL3559722' , 'CHEMBL3559723' , 'CHEMBL3559724' , 'CHEMBL36381
# 73' , 'CHEMBL3638174' , 'CHEMBL3638175' , 'CHEMBL3638176' , 'CHEMBL3638177'
'uberon_id': DefaultMappings.ID_REF,
# EXAMPLES:
# 'UBERON:0001969' , 'UBERON:0002106' , 'UBERON:0002107' , 'UBERON:0000956' , 'UBERON:0000007' , 'UBERON
# :0000014' , 'UBERON:0000029' , 'UBERON:0000160' , 'UBERON:0000173' , 'UBERON:0000178'
}
}
autocomplete_settings = {
'bto_id': 10,
'caloha_id': 10,
'efo_id': 10,
'pref_name': 100,
'tissue_chembl_id': 10,
'uberon_id': 10
}
|
mx=0
pt=0
def maxArea(ar):
global mx
global pt
for i in range(0,len(ar)):
lf=i
rg=i
cbuild=ar[i]
for j in range(i,-1,-1):
if(ar[j]>=cbuild):
lf=j
else:
break
for j in range(i,len(ar)):
if(ar[j]>=cbuild):
rg=j
else:
break
chk=cbuild*(rg-lf+1)
global mx
if(chk>mx):
pt=i
mx=chk
ar=input('Enter the height of respective histograms').strip()
ar=ar.split(' ')
ar=list(map(int,ar))
maxArea(ar)
print("Maximum area is ",mx," around the point ",pt)
|
"""
Test the open_bal() method to open the trustee Macau Balanced Fund.
"""
import unittest2
from webservice_client.id_lookup import get_security, put_security
from webservice_client.utility import get_server_url
class TestLookup(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestLookup, self).__init__(*args, **kwargs)
def test_get_security(self):
data = get_security(get_server_url(), 'JPM', '0D5402S')
self.assertEqual(len(data), 8)
self.assertEqual(data['security_id_type'], 'JPM')
self.assertEqual(data['security_id'], '0D5402S')
self.assertEqual(data['currency'], 'HKD')
self.assertEqual(data['isin'], '')
self.assertEqual(data['bloomberg_figi'], 'BBG00D2SB834')
self.assertEqual(data['geneva_investment_id'], '')
def test_put_security(self):
"""
Create a new security
"""
data = put_security(get_server_url(), get_security_data())
self.assertEqual(data, 'OK')
def test_put_security2(self):
"""
Update an existing security
"""
pass
def get_security_data():
return {
'security_id_type':'CMU',
'security_id':'WLHKFN09007',
'name':'WING LUNG BANK LTD 5.7 28DEC2021',
'isin':'',
'bloomberg_figi':'BBG00000WLY9',
'geneva_investment_id':'BBG00000WLY9 HTM',
'currency':'',
'comments':'This one has no ISIN, as of 2016-11-29'
}
|
def intr(p,r,t):
n=r/t
d=t*t
i=(p*((1+n)**(d)))-p
print(n)
print(d)
print(i)
a=float(input("Principle="))
b=float(input("Rate="))
c=int(input("Time(in year)="))
intr(a,b,c)
|
import json
import osmium as osm
import networkx as nx
class OSMHandler(osm.SimpleHandler):
def __init__(self):
super(OSMHandler, self).__init__()
self.G = nx.Graph()
def node(self, n):
lon, lat = str(n.location).split('/')
self.G.add_node(n.id, pos=(lon, lat))
def way(self, w):
for i, n in enumerate(w.nodes):
if i != len(w.nodes) - 1:
a, b = n.ref, w.nodes[i+1].ref
self.G.add_edge(a, b)
self.G.add_edge(b, a)
# 暂时不处理relation
def relation(self, r):
pass
def write_to_file(osmhandler):
# 映射: 原id -> 排序后的id
g = {Id: i for i, Id in enumerate(sorted(osmhandler.G.nodes, key=int))}
# 存储点的坐标 nodes.json
with open('out/nodes.json', 'w') as f:
nodes = {i: osmhandler.G.nodes[Id]['pos']
for i, Id in enumerate(sorted(osmhandler.G.nodes, key=int))}
json.dump(nodes, f, ensure_ascii=False,
indent=4, separators=(',', ': '))
# 保存为DIMACS格式 roads.txt
with open('out/roads.txt', 'w', encoding='utf-8') as f:
f.write('c XiaMen graph\n')
f.write(f'p edge {len(osmhandler.G.nodes)} {len(osmhandler.G.edges)}\n')
# 记录各边
for edge in osmhandler.G.edges:
e1, e2 = edge
f.write(f'e {g[e1]} {g[e2]}\n')
def main():
osmhandler = OSMHandler()
osmhandler.apply_file('data/map.osm')
write_to_file(osmhandler)
if __name__ == '__main__':
main()
print('Generate XiaMen Graph done!')
|
import pytest
import linkpred.util as u
def test_all_pairs():
s = [1, 2, 3, 4]
expected = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
assert sorted(u.all_pairs(s)) == expected
def test_load_function():
import os
assert u.load_function("os.path.join") == os.path.join
def test_load_function_no_modulename():
with pytest.raises(ValueError):
u.load_function("join")
def test_interpolate():
a = [10, 8, 9, 6, 6, 7, 3, 5, 6, 2, 1, 2]
assert u.interpolate(a) == [10, 9, 9, 7, 7, 7, 6, 6, 6, 2, 2, 2]
a = list(range(5))
assert u.interpolate(a) == [4] * 5
def test_itersubclasses():
class A:
pass
class Aa(A):
pass
class Ab(A):
pass
class Aaa(Aa):
pass
def name(x):
return x.__name__
assert list(map(name, u.itersubclasses(A))) == ["Aa", "Aaa", "Ab"]
# This is silly but hey... 100% test coverage for this file :-)
def test_itersubclasses_from_type():
list(u.itersubclasses(type))
|
"""
Sample answers
--------------
askcli::
<?xml version="1.0" encoding="UTF-8"?>
<aptcfg:APTData xmlns:aptcfg="http://www.telecomitalia.it/apt-config_version-1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><aptcfg:ErrorStatus><aptcfg:ErrorCode>3</aptcfg:ErrorCode>
</aptcfg:ErrorStatus>
<aptcfg:CLIList><aptcfg:CLI aptcfg:Status="Registered">0212345678</aptcfg:CLI>
</aptcfg:CLIList>
</aptcfg:APTData>
setcli with wrong CLI::
<?xml version="1.0" encoding="UTF-8"?>
<aptcfg:APTData xmlns:aptcfg="http://www.telecomitalia.it/apt-config_version-1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<aptcfg:ErrorStatus><aptcfg:ErrorCode aptcfg:CLI="0200000000">2</aptcfg:ErrorCode>
</aptcfg:ErrorStatus>
</aptcfg:APTData>
setcli with registered CLI::
<?xml version="1.0" encoding="UTF-8"?>
<aptcfg:APTData xmlns:aptcfg="http://www.telecomitalia.it/apt-config_version-1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><aptcfg:ErrorStatus>
<aptcfg:ErrorCode aptcfg:CLI="0212345678">3</aptcfg:ErrorCode>
</aptcfg:ErrorStatus>
</aptcfg:APTData>
successful setCLI::
<?xml version="1.0" encoding="UTF-8"?>
<aptcfg:APTData xmlns:aptcfg="http://www.telecomitalia.it/apt-config_version-1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><aptcfg:ErrorStatus><aptcfg:ErrorCode aptcfg:CLI="0212345678">0</aptcfg:ErrorCode>
</aptcfg:ErrorStatus>
<aptcfg:SIPTerminalParameters><aptcfg:OutboundProxyIP>172.16.1.1</aptcfg:OutboundProxyIP>
<aptcfg:LineIdentification>0212345678</aptcfg:LineIdentification>
<aptcfg:SIPkey>deadbeef</aptcfg:SIPkey>
<aptcfg:SIPDomain></aptcfg:SIPDomain>
</aptcfg:SIPTerminalParameters>
</aptcfg:APTData>
delallcli::
<?xml version="1.0" encoding="UTF-8"?>
<aptcfg:APTData xmlns:aptcfg="http://www.telecomitalia.it/apt-config_version-1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<aptcfg:ErrorStatus><aptcfg:ErrorCode>0</aptcfg:ErrorCode>
</aptcfg:ErrorStatus>
</aptcfg:APTData>
"""
from pivelli.options import o as opts
from pivelli.http import xmlrequest
def delallcli():
ns, res = xmlrequest('/aptconfig.xml?WINDWEB_URL=%2Fdoc2%2Fdelallcli')
if res[ns.ErrorStatus][ns.ErrorCode][0] != '0':
raise Exception, 'Incorrect answer from gateway'
def askcli():
"""
Returns a dictionary of boolean values, where the key is the CLI
and the value is True if the CLI is available for registration,
False otherwise.
"""
ns, res = xmlrequest('/aptconfig.xml?WINDWEB_URL=%2Fdoc2%2Faskcli')
cli_dict = {}
for cli in res[ns.CLIList][ns.CLI:]:
number = str(cli)
status = cli(ns.Status)
if status not in ('Registered', 'Available'):
raise Exception, 'Unknown status "%s" for CLI %s' % (status, number)
cli_dict[number] = (status == 'Available')
return cli_dict
def setcli(cli_list):
"""
Registers the given CLI and returns a quadruple:
(proxy, username, password, domain) for the VoIP service.
"""
# Build parameter list
setcli_parameters = []
for i, cli in enumerate(cli_list):
if i == 0:
parm_name = 'CLI'
else:
parm_name = 'CLI%s' % (i+1)
setcli_parameters.append(parm_name + '=' + cli)
# Call router
ns, res = xmlrequest('/aptconfig.xml?WINDWEB_URL=/doc2/setcli&'
+ '&'.join(setcli_parameters))
# Prepare return dictionary
cli_data = {}
# Parse error codes
for status_node in res[ns.ErrorStatus:]:
code = status_node[ns.ErrorCode]
cli = code(ns.CLI)
error = str(code)
if error == '0':
status = 'Success'
elif error == '2':
status = 'CLI not available to this gateway'
elif error == '3':
status = 'CLI already assigned to another device'
else:
status = 'Unknown error code'
cli_data[cli] = {'error': error, 'status': status}
# Retrieve parameters for successful lines
for data in res[ns.SIPTerminalParameters:]:
# Choose sub-dictionary
cli_dict = cli_data[str(data[ns.LineIdentification])]
# Fill sub-dictionary
cli_dict['username'] = str(data[ns.LineIdentification])
cli_dict['proxy'] = str(data[ns.OutboundProxyIP])
cli_dict['key'] = str(data[ns.SIPkey])
cli_dict['domain'] = str(data[ns.SIPDomain])
return cli_data
|
# https://atcoder.jp/contests/arc156/tasks/arc156_a
import sys
# input = sys.stdin.buffer.readline
def input(): return sys.stdin.readline().rstrip()
# sys.setrecursionlimit(10 ** 7)
def main():
N = int(input())
S = input()
cnt = 0
index_list = []
for i, c in enumerate(S):
if c=='1':
cnt += 1
index_list.append(i)
flg = True
if cnt%2:
flg = False
elif N==3 and S[1]=='1':
flg = False
if flg==False:
print(-1)
elif N==4 and cnt==2 and index_list[0]==1 and index_list[1]==2:
print(3)
elif cnt==2 and index_list[0]+1==index_list[1]:
print(2)
else:
print(cnt//2)
return
T = int(input())
for ti in range(T):
main()
# S = input()
# N = int(input())
# N, K = map(int, input().split())
# A = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
|
# Unary and Binary Operators
from ex129 import tokenizing
def unary(tokens):
operators = ['*', '/', '-', '+', '(']
for i in range(len(tokens)):
if (tokens[i] == '-' or tokens[i] == '+') and i == 0:
tokens[i] = 'U' + tokens[i]
elif (tokens[i] == '-' or tokens[i] == '+') and tokens[i-1] in operators:
tokens[i] = 'U' + tokens[i]
def main():
expression = input('enter a mathematical expression: ')
tokens = tokenizing(expression)
print(tokens)
unary(tokens)
print(tokens)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 06 09:36:44 2018
@author: c.massari
"""
import numpy as np
from scipy.stats import norm
def SPIcal(df_PP,acc_per):
# Group data by desired accumulation period and interpolate
month_values=df_PP.resample('M').sum()
month_values=month_values.interpolate()
accum_period=month_values.rolling(acc_per).mean()
SPI = accum_period.copy()
mesi=np.arange(1,13,1)
#npixel=np.arange(0,len(SSI.columns))
npixel=np.arange(0,len(SPI.columns))
for kk in npixel:
for jj in mesi:
dfM = accum_period[accum_period.index.month == jj]
series=dfM.values[:,kk]
series = series[~np.isnan(series)]
n=len(series)
bp=np.zeros(len(series))
for ii in range(len(series)):
bp[ii]=np.sum(series<=series[ii])
# Plotting position formula Gringorten
y=(bp-0.44)/(n+0.12);
z=norm.ppf(y)
SPI.iloc[accum_period.index.month == jj,kk]=z
return SPI
"""
mat = scipy.io.loadmat('PP_Esp.mat')
rain=mat['PP_Esp']
rain[rain<0]='0';
rain_m=np.reshape(rain,(4018,7))
date= pd.date_range(start='1/1/2007', end='31/12/2017', freq='D')
df_PP=pd.DataFrame(rain_m,index=date)
# df_SM must be a pandas dataframe where the index is time and the columns are the pixel contained
# in the sh#df_SM1=....
#df_SM1=df1 = df_SM.iloc[:,0:2]
acc_per=1
SPIad=SPIcal(df_PP,acc_per)
df=SPIad.iloc[:,1]
df.plot(x='Date', y='Result')
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version
if torch.__version__ == 'parrots':
TORCH_VERSION = torch.__version__
else:
# torch.__version__ could be 1.3.1+cu92, we only need the first two
# for comparison
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
def adaptive_avg_pool2d(input, output_size):
"""Handle empty batch dimension to adaptive_avg_pool2d.
Args:
input (tensor): 4D tensor.
output_size (int, tuple[int,int]): the target output size.
"""
if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
if isinstance(output_size, int):
output_size = [output_size, output_size]
output_size = [*input.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(input, output_size)
return empty
else:
return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):
"""Handle empty batch dimension to AdaptiveAvgPool2d."""
def forward(self, x):
# PyTorch 1.9 does not support empty tensor inference yet
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
output_size = self.output_size
if isinstance(output_size, int):
output_size = [output_size, output_size]
else:
output_size = [
v if v is not None else d
for v, d in zip(output_size,
x.size()[-2:])
]
output_size = [*x.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(x, output_size)
return empty
return super().forward(x)
|
'''
分析
使用双向的bfs,即从起点找能到达的点,同时从终点找能 到达它 的点,
两个点集有共同元素就找到了。注意点:
1、本来使用list记录牌(set无序),但是list不能通过set快速查找重复项,
所以选择用字符串记录
2、从1234遍历是把中间的牌放上面,反过来从4321遍历要把上面的牌放中间,
而不是中间的牌放下面
3、原本当前牌序和步数组成list一起放进队列,这样无法快速用set找相同的当前牌序,
于是拆开放在不同的队列了
4、坑1:对某一点遍历它所有变换时,一定要遍历完和该点步数相同的所有点的变换,
否则队列查看是否有相同牌序时会出现有些排序需要的步数比其他排序多/少1,
很可能出错
'''
from itertools import product
from time import clock
from queue import Queue
import itertools as it
def bfs2Way(firstcards,cnt):
q1,q2,n1,n2=Queue(),Queue(),Queue(),Queue()
q1.put(firstcards)
q2.put(firstcards[::-1])
n1.put(cnt)
n2.put(cnt)
while 1:
while 1:
cards1,cnt1=q1.get(),n1.get()
for i in range(1,half+1):
newCards1=cards1[i:i+half]+cards1[:i]+cards1[i+half:]
q1.put(newCards1)
n1.put(cnt1+1)
if cnt1!=n1.queue[0]:
break
if check(q1,q2):
return cnt1*2+1
while 1:
cards2,cnt2=q2.get(),n2.get()
for i in range(1,half+1):
newCards2=cards2[half:half+i]+cards2[:half]+cards2[i+half:]
q2.put(newCards2)
n2.put(cnt2+1)
if cnt2!=n2.queue[0]:
break
if check(q1,q2):
return cnt2*2+2
def check(q1,q2):
x=set(q1.queue).intersection(set(q2.queue))
if len(x)>0:
print('-'*22,x)
return 1
cardNumber=10
half=int(cardNumber/2)
willFormatedString='{}'*cardNumber
sortedCards=willFormatedString.format(*range(cardNumber))
print(bfs2Way(sortedCards,0)) #12
|
ishwar_stack = []
for i in range(10):
ishwar_stack.append(i) # pushing into the stack
print(ishwar_stack)
ishwar_stack.pop()
ishwar_stack.pop()
print(ishwar_stack) |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 15:16:45 2019
"""
import math as mat
base = int(input("Base: "))
expo = int(input("Exponente: "))
acum = mat.pow(base, expo)
print(acum) |
import service.constant as constant
import json
import os
from service.ManagementService import ManagementService
class LookupService:
def __init__(self, config):
self.config = config
def update_teams_and_users(self, teams, update_to_file):
teams_lookup = []
users_lookup = []
headers = {'apikey': str(self.config['API_KEY']), 'username': str(self.config['API_USERNAME'])}
management_service = ManagementService(str(self.config['MANAGEMENT_SERVICE_HOST']), (str(self.config['USER_MANAGEMENT_SERVICE_ROUTE'])), headers)
for team in teams:
try:
if team.has_key('teamcode'):
teams_lookup.append(str(team['teamcode']))
team_users = self.geta_users_for_team(management_service, str(team['teamcode']))
users_lookup = users_lookup + team_users
except UnicodeEncodeError:
print ('UnicodeEncodeError fot Team, Skipping the record .... ')
actuals = {}
actuals[constant.TEAM_ATTRIBUTE] = teams_lookup
actuals[constant.USER_ATTRIBUTE] = users_lookup
file_directory = os.path.dirname(os.path.realpath('__file__'))
filename = os.path.join(file_directory, update_to_file)
with open(filename, 'w') as outfile:
json.dump(actuals, outfile)
def geta_users_for_team(self, service, team_name):
team_users = []
users = service.get_users_for_team(team_name)
if users is not None:
for user in users:
try:
if user.has_key('userid'):
team_users.append(str(user['userid']))
except AttributeError:
print('User Attribute Error for User:' + user)
return team_users
def get_teams_and_users(self):
file_directory = os.path.dirname(os.path.realpath('__file__'))
filename = os.path.join(file_directory, constant.LOOKUP_FILE_NAME)
with open(filename) as json_file:
data = json.load(json_file)
return data[constant.TEAM_ATTRIBUTE], data[constant.USER_ATTRIBUTE]
def get_content(self, filename, property_key):
file_directory = os.path.dirname(os.path.realpath('__file__'))
filename = os.path.join(file_directory, filename)
with open(filename) as json_file:
data = json.load(json_file)
return data[property_key]
|
"""For fun I will try to write a player with no ability to search ahead, only an eval function"""
import David_AI_v8 as ai
from copy import copy
PIECE_MOVE_DIRECTION = {
'R': (1, 16, -1, -16),
'B': (1+16, 1-16, 16-1, -1-16),
'N': (1+2*16, 1-2*16, -1+2*16, -1-2*16, 2+16, 2-16, -2+16, -2-16)}
PIECE_MOVE_DIRECTION['K'] = PIECE_MOVE_DIRECTION['R'] + PIECE_MOVE_DIRECTION['B']
PIECE_MOVE_DIRECTION['Q'] = PIECE_MOVE_DIRECTION['K']
for _piece in copy(PIECE_MOVE_DIRECTION):
PIECE_MOVE_DIRECTION[_piece.lower()] = PIECE_MOVE_DIRECTION[_piece]
PIECE_VALUE = {
'.': 0,
'K': 20000, 'Q': 975, 'R': 500, 'B': 335, 'N': 325, 'P': 100,
'k': -20000, 'q': -975, 'r': -500, 'b': -335, 'n': -325, 'p': -100
}
POSITION_VALUE_READABLE = {
'P': [
[0, 0, 0, 0, 0, 0, 0, 0],
[50, 50, 50, 50, 50, 50, 50, 50],
[10, 10, 20, 30, 30, 20, 10, 10],
[5, 5, 10, 25, 25, 10, 5, 5],
[0, 0, 0, 2, 2, 0, 0, 0],
[5, -5,-10, 0, 0,-10, -5, 5],
[5, 10, 10,-20,-20, 10, 10, 5],
[0, 0, 0, 0, 0, 0, 0, 0]],
# [[5*(x - (x * x / 7))+(0.02 * (y+2)**4)-10 for x in range(8)] for y in range(7, -1, -1)],
# print('\n'.join(' '.join('{}'.format(int(PAWN_POSITION_VALUE[y][x]))
# for x in range(8))for y in range(8))+'\n')
'N': [
[-8, -8, -8, -8, -8, -8, -8, -8],
[-8, 0, 0, 0, 0, 0, 0, -8],
[-8, 0, 4, 6, 6, 4, 0, -8],
[-8, 0, 6, 8, 8, 6, 0, -8],
[-8, 0, 6, 8, 8, 6, 0, -8],
[-8, 0, 4, 6, 6, 4, 0, -8],
[-8, 0, 1, 2, 2, 1, 0, -8],
[-16,-12, -8, -8, -8, -8, -12, -16]],
'B': [
[-4, -4, -4, -4, -4, -4, -4, -4],
[-4, 0, 0, 0, 0, 0, 0, -4],
[-4, 0, 2, 4, 4, 2, 0, -4],
[-4, 0, 4, 6, 6, 4, 0, -4],
[-4, 0, 4, 6, 6, 4, 0, -4],
[-4, 1, 2, 4, 4, 2, 1, -4],
[-4, 2, 1, 1, 1, 1, 2, -4],
[-4, -4, -12, -4, -4, -12, -4, -4]],
'R': [
[5, 5, 5, 5, 5, 5, 5, 5],
[-5, 0, 0, 0, 0, 0, 0, -5],
[-5, 0, 0, 0, 0, 0, 0, -5],
[-5, 0, 0, 0, 0, 0, 0, -5],
[-5, 0, 0, 0, 0, 0, 0, -5],
[-5, 0, 0, 0, 0, 0, 0, -5],
[-5, 0, 0, 0, 0, 0, 0, -5],
[0, 0, 0, 2, 2, 0, 0, 0]],
'Q': [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 2, 2, 1, 0, 0],
[0, 0, 2, 3, 3, 2, 0, 0],
[0, 0, 2, 3, 3, 2, 0, 0],
[0, 0, 1, 2, 2, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[-5, -5, -5, -5, -5, -5, -5, -5]],
# this should change with the game's phase
'K': [
[-40, -30, -50, -70, -70, -50, -30, -40],
[-30, -20, -40, -60, -60, -40, -20, -30],
[-20, -10, -30, -50, -50, -30, -10, -20],
[-10, 0, -20, -40, -40, -20, 0, -10],
[0, 10, -10, -30, -30, -10, 10, 0],
[10, 20, 0, -20, -20, 0, 20, 10],
[30, 40, 20, 0, 0, 20, 40, 30],
[40, 50, 30, 10, 10, 30, 50, 40]],
'.': [[0 for _ in range(8)] for _ in range(8)]
}
# The last 4 chars of the board contain the castling rights.
# If the char is true then castling is allowed
BOTTOM_LEFT_CASTLING = 124
BOTTOM_RIGHT_CASTLING = 125
TOP_LEFT_CASTLING = 126
TOP_RIGHT_CASTLING = 127
POSITION_VALUE = dict()
for piece_ in POSITION_VALUE_READABLE:
POSITION_VALUE[piece_] = []
POSITION_VALUE[piece_.lower()] = []
for row in POSITION_VALUE_READABLE[piece_].__reversed__():
POSITION_VALUE[piece_].extend(
[PIECE_VALUE[piece_]+value for value in row]+[None]*8)
for row in POSITION_VALUE_READABLE[piece_]:
POSITION_VALUE[piece_.lower()].extend(
[-PIECE_VALUE[piece_]-value for value in row.__reversed__()]+[None]*8)
assert len(POSITION_VALUE['K']) == 128
def evaluate(board)->float:
return sum(POSITION_VALUE[board[x+16*y]][x+16*y] for x in range(8) for y in range(8))
def main(given_history, white_time, black_time):
history = ai.to_array(given_history)
player_is_white = len(history) % 2 == 1
current_board = history[-1]
best_score = -10**10
for move, diff in ai.moves(current_board, player_is_white):
score = evaluate(move)
if not player_is_white:
score *= -1
if score > best_score:
best_score = score
best_move = move
print(f'search depth: 1')
print(f'expected score: {best_score}')
return ai.from_array(best_move)
|
import sensorMessage as sm
import random
ACCELERATION_RATE = 10
GYROSCOPE_RATE = 20
MAGNETOMETER_RATE = 50
PRESSURE_RATE = 10
CPU_TEMP_RATE = 1000
#maxTime = 30*60*1000+1
maxTime = 1001
random.seed()
with open("data.log", 'wb') as testFile:
for timestamp in range(0, maxTime):
if not (timestamp % ACCELERATION_RATE):
data = (random.randint(0, 32767), random.randint(0, 32767), random.randint(0, 32767))
testFile.write(sm.packMessage(sm.accelerationRawDataID, timestamp, data))
if not (timestamp % GYROSCOPE_RATE):
data = (random.randint(0, 32767), random.randint(0, 32767), random.randint(0, 32767))
testFile.write(sm.packMessage(sm.gyroscopeRawDataID, timestamp, data))
if not (timestamp % PRESSURE_RATE):
data = (random.randint(0, 32767), random.randint(0, 32767), random.randint(0, 32767))
testFile.write(sm.packMessage(sm.pressureRawDataID, timestamp, data))
if not (timestamp % CPU_TEMP_RATE):
data = (random.randint(0, 32767))
testFile.write(sm.packMessage(sm.cpuTemperatureRawDataID, timestamp, data))
# j = 1
# for i in range(0, maxTime, 10):
# testFile.write(sm.packMessage(sm.accelerationRawDataID, i, (j, j+1, j+2)))
# j = j + 3
# j = j % 32760
# j = 1
# for i in range(0, maxTime, 20):
# testFile.write(sm.packMessage(sm.gyroscopeRawDataID, i, (j, j+1, j+2)))
# j = j + 3
# j = j % 32760
#
# j = 1
# for i in range(0, maxTime, 20):
# testFile.write(sm.packMessage(sm.pressureRawDataID, i, (j, j+1, j+2)))
# j = j + 3
# j = j % 32760
#
# j = 1
# for i in range(0, maxTime, 100):
# testFile.write(sm.packMessage(sm.cpuTemperatureRawDataID, i, (j)))
# j = j + 1
# j = j % 32760
|
from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
class kw1(db.Model):
id = db.Column(db.Integer, primary_key = True )
keyword = db.Column(db.String(20), unique = True)
class kw2(db.Model):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String, unique = True)
email = db.Column(db.String, unique = True)
password = db.Column(db.String)
key_number = db.Column(db.Integer, )
class users1(db.Model):
id = db.Column( db.Integer, primary_key=True)
username = db.Column( db.String(80), unique = True)
password = db.Column( db.String(60), unique = True)
email = db.Column( db.String(80), nullable = False, unique = True)
integer21 = db.Column( db.Integer, )
text21 = db.Column( db.Text)
datetime21 = db.Column( db.DateTime, )
float21 = db.Column( db.Float )
boolean21 = db.Column( db.Boolean )
pickleType21 = db.Column( db.PickleType )
largeBinary21 = db.Column( db.LargeBinary, )
@app.route('/form', methods=['POST','GET'])
def form():
if request.method == 'POST':
keyword1 = request.form['keyword']
k1_obj = kw1(keyword=keyword1)
try:
db.session.add(k1_obj)
db.session.commit()
return redirect('/')
except:
return 'There was an error, try again!!!'
else:
return render_template('form.html')
@app.route('/')
def index():
return '<html><a href="/form">/form</a><br><a href="/records">/records</a></html>'
@app.route('/jumbotron')
def jumbotron():
return render_template('jumbotron.html')
@app.route('/records')
def records():
records_db = kw1.query.order_by(kw1.id).all()
return render_template('records.html', records_html = records_db)
@app.route('/blog')
def blog():
return render_template('blog.html')
if __name__=='__main__':
app.run(debug=True)
|
import gffutils
import sys
import json
import csv
import os
import numpy as np
'''
#gencodegtf=sys.argv[1]
#expressionrows=sys.argv[2]
#db=gffutils.create_db(gencodegtf, "gencodev32.db")
db=gffutils.FeatureDB("gencodev32.db",keep_order=True)
features = db.all_features()
id_dict={}
for feat in features:
chr_num=feat[0]
type=feat[2]
start=feat[3]
end=feat[4]
gene_id=(feat[8]["gene_id"][0]).split(".")[0]
gene_name=feat[8]["gene_name"][0]
gene_type=feat[8]["gene_type"][0]
if type=="gene" and gene_type=="protein_coding":
if chr_num not in id_dict:
id_dict[chr_num]={gene_id:{"start":start,
"end":end,
"gene_name":gene_name}}
else:
already=id_dict[chr_num]
already[gene_id]={"start":start,
"end":end,
"gene_name":gene_name}
id_dict[chr_num]=already
with open("gencodev32.json", "w") as write_file:
json.dump(id_dict, write_file)
'''
input_dir="/Users/asmaimran/MBRU/AbdulRahman_study/early_development"
gen_path="gencodev32.json"
with open(gen_path,"r") as g:
id_dict=json.load(g)
with open(os.path.join(input_dir,"RT_BG01_NPC_AvgZ_hg38.bedgraph"),"r") as rr:
mutations=csv.reader(rr,delimiter="\t")
next(mutations, None)
l=[]
for mut in mutations:
chr=mut[0]
start_mut=int(mut[1])
end_mut=int(mut[2])
rep_time=float(mut[3])
all_genes=id_dict[chr]
i=0
for gene in all_genes:
gene_range=range(all_genes[gene]["start"],all_genes[gene]["end"]+1)
if start_mut in gene_range and end_mut in gene_range:
l.append([chr,start_mut,end_mut,rep_time,all_genes[gene]["gene_name"]])
i=1
break
if i==0:
l.append([chr,start_mut,end_mut,rep_time,np.nan])
with open(os.path.join(input_dir,"RT_BG01_NPC_AvgZ_hg38.ann.csv"),"w") as ww:
writer=csv.writer(ww,delimiter="\t")
writer.writerows(l)
|
import ui
import chat
import app
import fgGHGjjFHJghjfFG1545gGG
import snd
import item
import GFHhg54GHGhh45GHGH
import uiToolTip
import wndMgr
import time
import grp
import mouseModule
import constInfo
import event
import localeInfo
SORT_ALL = 0
SORT_PVP = 1
SORT_PVM = 2
SORT_OFFENSIVE = 3
SORT_DEFENSIVE = 4
SORT_OTHERS = 5
SORT_COUNT = 6
ATTR_WEAPON = 0
ATTR_BODY = 1
ATTR_WRIST = 2
ATTR_FOOTS = 3
ATTR_NECK = 4
ATTR_HEAD = 5
ATTR_SHIELD = 6
ATTR_EAR = 7
BONUS_NAME = 0
BONUS_VALUE = 1
BONUS_MAX_VALUE = 2
BONUS_SORT_LIST = 3
BOARD_COUNT = 16
class BonusBoardWindow(ui.ScriptWindow):
def SetDefaultBonusPage(self):
self.BonusBoard = [
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_ATTBONUS_HUMAN,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_CRITICAL,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
[
localeInfo.BONUS_BOARD_BONUS_HALFHUMAN,
item.APPLY_CRITICAL_PCT,
0,
[SORT_ALL,SORT_PVP,SORT_OFFENSIVE],
[ATTR_WEAPON],
],
]
sortPage = 0
updateTimer = 0
updateWait = 1
curPageContent = []
def __init__(self):
ui.ScriptWindow.__init__(self)
self.LoadWindow()
def __del__(self):
#constInfo.CALOPEN = 1
ui.ScriptWindow.__del__(self)
def LoadWindow(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "exscript/bonusboard.py")
except:
import exception
exception.Abort("CalenderWindow.LoadWindow.LoadObject")
self.GetChild("TitleBar").SetCloseEvent(self.Close)
self.sortButtonList = []
self.sortButtonList.append(self.GetChild("nav_button_0"))
self.sortButtonList.append(self.GetChild("nav_button_1"))
self.sortButtonList.append(self.GetChild("nav_button_2"))
self.sortButtonList.append(self.GetChild("nav_button_3"))
self.sortButtonList.append(self.GetChild("nav_button_4"))
self.sortButtonList.append(self.GetChild("nav_button_5"))
self.sortButtonList[SORT_ALL].SetEvent(lambda arg=SORT_ALL: self.LoadPage(arg))
self.sortButtonList[SORT_PVP].SetEvent(lambda arg=SORT_PVP: self.LoadPage(arg))
self.sortButtonList[SORT_PVM].SetEvent(lambda arg=SORT_PVM: self.LoadPage(arg))
self.sortButtonList[SORT_OFFENSIVE].SetEvent(lambda arg=SORT_OFFENSIVE: self.LoadPage(arg))
self.sortButtonList[SORT_DEFENSIVE].SetEvent(lambda arg=SORT_DEFENSIVE: self.LoadPage(arg))
self.sortButtonList[SORT_OTHERS].SetEvent(lambda arg=SORT_OTHERS: self.LoadPage(arg))
self.bonusWindow = []
self.bonusNameTextLine = []
self.bonusValueTextLine = []
self.bonusMaxValueTextLine = []
self.bonusEquipedTextLine = []
for i in xrange(BOARD_COUNT):
self.bonusWindow.append(self.GetChild("bonus_window_" + str(i)))
self.bonusNameTextLine.append(self.GetChild("bonus_name_textline_" + str(i)))
self.bonusValueTextLine.append(self.GetChild("bonus_value_textline_" + str(i)))
self.bonusMaxValueTextLine.append(self.GetChild("bonus_max_value_textline_" + str(i)))
self.bonusEquipedTextLine.append(self.GetChild("bonus_equip_textline_" + str(i)))
self.scrollBar = self.GetChild("bonusScrollBar")
self.scrollBar.SetScrollEvent(self.OnScroll)
self.SetDefaultBonusPage()
self.LoadPage(0)
# self.Open()
# self.eventDayImage[12].Show()
def LoadPage(self,idx):
self.sortPage = idx
for i in xrange(SORT_COUNT):
if i == idx:
self.sortButtonList[i].Disable()
else:
self.sortButtonList[i].Enable()
self.Clear()
self.SortBonusList()
self.InitBonusList()
def SortBonusList(self):
for i in xrange(len(self.BonusBoard)):
if self.sortPage in self.BonusBoard[i][BONUS_SORT_LIST]:
self.curPageContent.append(i)
def InitBonusList(self):
count = min(BOARD_COUNT,len(self.curPageContent))
for i in xrange(count):
bonusIndex = self.curPageContent[i]
self.bonusNameTextLine[i].SetText(self.BonusBoard[bonusIndex][BONUS_NAME])
self.bonusValueTextLine[i].SetText(self.CheckBonusValueByID(self.BonusBoard[bonusIndex][BONUS_VALUE]))
self.bonusWindow[i].Show()
if len(self.curPageContent) > BOARD_COUNT:
self.scrollBar.SetPos(0)
self.scrollBar.SetMiddleBarSize(float(BOARD_COUNT) / float(len(self.curPageContent)))
self.scrollBar.Show()
else:
self.scrollBar.Hide()
def OnScroll(self):
pos = int(self.scrollBar.GetPos() * (len(self.curPageContent) - BOARD_COUNT)) ##Aktuelle Position der Scrollbar
#self.Board.SetTitleName("Achievement-Statistik (Pos: " + str(pos) + ")")
for i in xrange(BOARD_COUNT):
realPos = i + pos
if realPos < len(self.curPageContent):
bonusIndex = self.curPageContent[realPos]
self.bonusNameTextLine[i].SetText(self.BonusBoard[realPos][BONUS_NAME])
self.bonusValueTextLine[i].SetText(self.CheckBonusValueByID(self.BonusBoard[realPos][BONUS_VALUE]))
def CheckBonusValueByID(self,id):
value = 0
for slot in xrange(90, 101):
for attr in xrange(0, 7):
attr, val = fgGHGjjFHJghjfFG1545gGG.GetItemAttribute(slot, attr)
if int(attr) == id:
value += int(val)
return str(value)
def Clear(self):
self.scrollBar.SetPos(0)
self.curPageContent = []
self.scrollBar.Hide()
for i in xrange(BOARD_COUNT):
self.bonusWindow[i].Hide()
def OnPressEscapeKey(self):
self.Close()
return True
def Open(self):
if self.IsShow():
self.Close()
else:
self.Show()
def Close(self):
self.Hide()
|
from extruder_turtle import ExtruderTurtle
import math
N = 60
RADIUS = 12
dtheta = 2*math.pi/N
dx = RADIUS*dtheta
dr = -5/N
MAX_HEIGHT = 10
t = ExtruderTurtle()
t.name("spiral-tower.gcode")
t.setup(x=100, y=100)
t.set_density(0.05)
t.rate(1000)
for l in range(40):
radius = RADIUS
prop = l/40
while radius > 0:
t.move_lift(dx, prop*MAX_HEIGHT*(-dr)/RADIUS)
t.right(dtheta)
radius += dr
dx = radius * dtheta
t.lift(0.2)
while radius < RADIUS:
radius += -dr
dx = radius * dtheta
t.left(dtheta)
t.move_lift(-dx, prop*MAX_HEIGHT*dr/RADIUS)
t.lift(0.2)
t.finish()
|
from output_scores_and_acc import read_labels, get_articles_by_id, compute_article_sentiment
import json
if __name__ == "__main__":
ids, gold_labels = read_labels('../labeled_news/news_labeled.xlsx')
valid_ids, valid_labels, articles = get_articles_by_id(ids, gold_labels)
scores_objects = compute_article_sentiment(articles)
output = []
for obj in scores_objects:
paragraphs = obj.get_paragraphs(obj.article)
for index in range(len(obj.scores_by_paragraph)):
if obj.scores_by_paragraph[index] != 0:
if obj.scores_by_paragraph[index] >= 0:
output.append({'text': paragraphs[index], 'score': obj.scores_by_paragraph[index], 'label': 1})
else:
output.append({'text': paragraphs[index], 'score': obj.scores_by_paragraph[index], 'label': 0})
with open('../results/paragraph_scores.json', 'w') as outfile:
json.dump(output, outfile)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ZolozIdentificationCustomerCertifyInitializeModel(object):
def __init__(self):
self._biz_id = None
self._biz_type = None
self._cert_name = None
self._cert_no = None
self._cert_type = None
self._has_welcome_page = None
self._metainfo = None
self._terminal_pos = None
self._user_id = None
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def cert_name(self):
return self._cert_name
@cert_name.setter
def cert_name(self, value):
self._cert_name = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def has_welcome_page(self):
return self._has_welcome_page
@has_welcome_page.setter
def has_welcome_page(self, value):
self._has_welcome_page = value
@property
def metainfo(self):
return self._metainfo
@metainfo.setter
def metainfo(self, value):
self._metainfo = value
@property
def terminal_pos(self):
return self._terminal_pos
@terminal_pos.setter
def terminal_pos(self, value):
self._terminal_pos = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.cert_name:
if hasattr(self.cert_name, 'to_alipay_dict'):
params['cert_name'] = self.cert_name.to_alipay_dict()
else:
params['cert_name'] = self.cert_name
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.cert_type:
if hasattr(self.cert_type, 'to_alipay_dict'):
params['cert_type'] = self.cert_type.to_alipay_dict()
else:
params['cert_type'] = self.cert_type
if self.has_welcome_page:
if hasattr(self.has_welcome_page, 'to_alipay_dict'):
params['has_welcome_page'] = self.has_welcome_page.to_alipay_dict()
else:
params['has_welcome_page'] = self.has_welcome_page
if self.metainfo:
if hasattr(self.metainfo, 'to_alipay_dict'):
params['metainfo'] = self.metainfo.to_alipay_dict()
else:
params['metainfo'] = self.metainfo
if self.terminal_pos:
if hasattr(self.terminal_pos, 'to_alipay_dict'):
params['terminal_pos'] = self.terminal_pos.to_alipay_dict()
else:
params['terminal_pos'] = self.terminal_pos
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZolozIdentificationCustomerCertifyInitializeModel()
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'cert_name' in d:
o.cert_name = d['cert_name']
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'cert_type' in d:
o.cert_type = d['cert_type']
if 'has_welcome_page' in d:
o.has_welcome_page = d['has_welcome_page']
if 'metainfo' in d:
o.metainfo = d['metainfo']
if 'terminal_pos' in d:
o.terminal_pos = d['terminal_pos']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
import sys
input = sys.stdin.readline
from pprint import pprint
print(ord('A'))
while 1:
n = int(input())
if n == 0:
break
board = [
[0 for _ in range(9)] for _ in range(9)
]
# dominos
for _ in range(n):
u, lu, v, lv = input().split()
board[ord(lu[0])-65][int(lu[1])-1] = int(u)
board[ord(lv[0])-65][int(lv[1])-1] = int(v)
# numbers
numbers = input().split()
for i, num in enumerate(numbers, 1):
board[ord(num[0])-65][int(num[1])-1] = i
pprint(board)
|
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import Imputer
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
import pandas as pd
from io import StringIO
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# настроить генератор маркеров и палитру
markers = ('s', 'x', '^', 'o', 'v')
colors = ('red', 'blue', 'green', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# вывести поверхность решения
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contour(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# показать образцы классов
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:, 1], c='yellow',
alpha=1.0, linewidths=1, marker='o',
s=55, label='тестовый набор')
# # Создаем набор данных
# X_xor = np.random.randn(200, 2)
# y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
# y_xor = np.where(y_xor, 1, -1)
#
# plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1],
# c='b', marker='x', label='1')
# plt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1],
# c='r', marker='s', label='-1')
# plt.ylim(-3.0)
# plt.legend()
# plt.show()
#
# # Создание НС на основе ядра из функций радиального базиса
# svm = SVC(kernel='rbf', gamma=0.2, C=1.0, random_state=0)
# svm.fit(X_xor, y_xor)
#
# plot_decision_regions(X=X_xor, y=y_xor, classifier=svm)
# plt.title('НС на основе ядра из функций радиального базиса)')
# plt.legend(loc='upper left')
# plt.show()
csv_data = '''A, B, C, D
1.0, 2.0, 3.0, 4.0
5.0, 6.0,, 8.0
10.0, 11.0, 12.0,'''
df = pd.read_csv(StringIO(csv_data))
print(df)
print('')
# print(df.isnull().sum())
# print(df.dropna())
imr = Imputer(missing_values='NaN', strategy='mean', axis=1)
imr = imr.fit(df)
impured_date = imr.transform(df.values)
print(impured_date)
print('')
dv = pd.DataFrame([
['зеленый', 'M', 10.1, 'класс1'],
['красный', 'L', 13.5, 'класс2'],
['синий', 'XL', 15.3, 'класс1']
])
dv.columns = ['Цвет', 'Размер', 'Цена', 'Метка']
size_mapping = {
'XL': 3,
'L' : 2,
'M' : 1
}
print(dv)
print('')
dv['Размер'] = dv['Размер'].map(size_mapping)
print(dv)
print('')
inv_size_mapping = {v: k for k, v in size_mapping.items()}
dv['Размер'] = dv['Размер'].map(inv_size_mapping)
print(dv)
print('') |
# Generated by Django 4.1.7 on 2023-05-19 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0021_userprofile_display_url'),
]
operations = [
migrations.AddField(
model_name='bookmark',
name='notes',
field=models.TextField(blank=True),
),
]
|
#coding=utf-8
__author__ = 'lixiaojian'
# from server import app
# from flask_sqlalchemy import SQLAlchemy
#
# db = SQLAlchemy(app)
# db_session=db.session
#
#
# def init_db():
# # 在这里导入所有的可能与定义模型有关的模块,这样他们才会合适地
# # 在 metadata 中注册。否则,您将不得不在第一次执行 init_db() 时
# # 先导入他们。
#
# from orm.model import *
# db.create_all()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = 'TesterCC'
# __time__ = '17/10/4 20:33'
"""
05-15-拆包,元组、字典
"""
def test(a, b, c=33, *args, **kwargs):
print(a)
print(b)
print(c)
print(args)
print(kwargs)
# test(11, 22, 33, 44, 55, 66, 77, task=99, done=89)
A = (44, 55, 66)
B = {"name": "Alice", "age": 18}
test(11, 22, 33, *A, **B) |
from .logger import Logger
from .loop_tracker import LoopTracker
from .show_variable_logger import ShowVariableLogger
from .numpy_extension import *
|
from data_structures.linked_lists.linked_list import LinkedList, Node
# Todo: Add tests for the merge subroutine
def partition(linked_list: LinkedList, value: int) -> LinkedList:
""" Partitions a LinkedList object around a value
It does this using three LinkedLists objects:
before_list: Will hold all the values smaller than the partition value
equal_list: Will hold all the values equal than the partition value
after_list: Will hold all the values bigger than the partition value
First, we iterate through the linked list and append each value to the correct sublist
Afterwards, we just neeed to merge the three lists together.
:param linked_list:
:param value:
:return:
"""
before_list, after_list, equal_list = LinkedList(), LinkedList(), LinkedList()
ptr = linked_list.head # type: Node
while ptr:
append_correct_list(after_list, before_list, equal_list, ptr, value)
ptr = ptr.next
merge(after_list, before_list, equal_list)
return before_list
def append_correct_list(after_list, before_list, equal_list, ptr, value):
if ptr.value == value:
equal_list.append(ptr.value)
elif ptr.value < value:
before_list.append(ptr.value)
else:
after_list.append(ptr.value)
def merge(after_list: LinkedList, before_list: LinkedList, equal_list: LinkedList) -> None:
"""
Merges the sublists
If we can assumer that before list and equal list are not empty,
then the code wouldn't need those if statements.
:param after_list: List with values bigger than the partition value
:param before_list: List with values smaller than the partition value
:param equal_list: List with values equal to the partition value
:return:
"""
if not equal_list.head:
equal_list.head = equal_list.tail = after_list.head
else:
equal_list.tail.next = after_list.head
if not before_list.head:
before_list.head = before_list.tail = equal_list.head
else:
before_list.tail.next = equal_list.head
|
from anode_data_loader import mnist
from base import *
from mnist.mnist_train import train
parser = ArgumentParser()
parser.add_argument('--tol', type=float, default=1e-3)
parser.add_argument('--adjoint', type=eval, default=False)
parser.add_argument('--visualize', type=eval, default=True)
parser.add_argument('--niters', type=int, default=40)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# shape: [time, batch, derivatives, channel, x, y]
dim = 5
hidden = 49
trdat, tsdat = mnist(batch_size=256)
class initial_velocity(nn.Module):
def __init__(self, in_channels, out_channels, nhidden):
super(initial_velocity, self).__init__()
assert (3 * out_channels >= in_channels)
self.actv = nn.LeakyReLU(0.3)
self.fc1 = nn.Conv2d(in_channels, nhidden, kernel_size=1, padding=0)
self.fc2 = nn.Conv2d(nhidden, nhidden, kernel_size=3, padding=1)
self.fc3 = nn.Conv2d(nhidden, 2 * out_channels - in_channels, kernel_size=1, padding=0)
self.out_channels = out_channels
self.in_channels = in_channels
def forward(self, x0):
x0 = x0.float()
# out = repeat(torch.zeros_like(x0[:,:1]), 'b 1 ... -> b d c ...', d=2, c=self.out_channels)
# out[:, 0, :self.in_channels] = x0
# """
out = self.fc1(x0)
out = self.actv(out)
out = self.fc2(out)
out = self.actv(out)
out = self.fc3(out)
out = torch.cat([x0, out], dim=1)
out = rearrange(out, 'b (d c) ... -> b d c ...', d=2)
# """
return out
class DF(nn.Module):
def __init__(self, in_channels, nhidden):
super(DF, self).__init__()
self.activation = nn.LeakyReLU(0.01)
self.fc1 = nn.Conv2d(in_channels + 1, nhidden, kernel_size=1, padding=0)
self.fc2 = nn.Conv2d(nhidden + 1, nhidden, kernel_size=3, padding=1)
self.fc3 = nn.Conv2d(nhidden + 1, in_channels, kernel_size=1, padding=0)
def forward(self, t, x0):
out = rearrange(x0, 'b 1 c x y -> b c x y')
t_img = torch.ones_like(out[:, :1, :, :]).to(device=args.gpu) * t
out = torch.cat([out, t_img], dim=1)
out = self.fc1(out)
out = self.activation(out)
out = torch.cat([out, t_img], dim=1)
out = self.fc2(out)
out = self.activation(out)
out = torch.cat([out, t_img], dim=1)
out = self.fc3(out)
out = rearrange(out, 'b c x y -> b 1 c x y')
return out
class predictionlayer(nn.Module):
def __init__(self, in_channels):
super(predictionlayer, self).__init__()
self.dense = nn.Linear(in_channels * 28 * 28, 10)
def forward(self, x):
x = rearrange(x[:, 0], 'b c x y -> b (c x y)')
x = self.dense(x)
return x
file = open('./data/0.txt', 'a')
for tries in range(5):
hblayer = NODElayer(HeavyBallNODE(DF(dim, hidden), None))
model = nn.Sequential(initial_velocity(1, dim, hidden), hblayer, predictionlayer(dim)).to(device=args.gpu)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.000)
train(model, optimizer, trdat, tsdat, args, 1, gamma=model[1].df.gamma)
|
# Write a function that takes string as input,
# and returns a dictionary object,
# where the keys are the characters in the string,
# and the values are the number of times each letter occurs.
def letter_count(text):
output = dict()
for c in text:
if c not in output:
output[c] = 1
else:
output[c] += 1
return output
result = letter_count("Now is the time for all good people to come to the aid of their planet.")
print(result)
assert result == {'a': 3, ' ': 15, 'c': 1, 'e': 8, 'd': 2, 'g': 1, 'f': 2, 'i': 4, 'h': 3, 'm': 2, 'l': 4, 'o': 9,
'N': 1, 'p': 3, 's': 1, 'r': 2, 't': 7, 'w': 1, '.': 1, 'n': 1}
# EXTRA CREDIT:
# How could you ignore spaces?
# How could you get the largest letter?
# How could you find the number of occurrences of whole words instead of letters?
# How could you return them in order of frequency?
# How else could this be improved or re-purposed?
|
import gym
import numpy as np
import matplotlib.pyplot as plt
import torch
from collections import deque
from lunarlander_agent import Lunarlander
def trival_landing(env, agent, render = True, plot = True):
'''
Try 10 times landing with 200 steps of move in each land try, render in gym and display
'''
TRY_SIZE = 10
scores = np.zeros(TRY_SIZE)
for i in range(TRY_SIZE):
state = env.reset()
score = 0
for j in range(200):
action = agent.act(state)
if render:
env.render()
state, reward, done, _ = env.step(action)
score += reward
if done:
break
scores[i] = score
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Try #')
plt.show()
def train_model(env, agent, n_episodes = 1000, max_t = 1000, eps_start = 1.0, eps_end = 0.01, eps_decay = 0.995, render = True, plot = True):
# agent.set_agent_details(gamma = 0.99, tau = 1e-3, learning_rate = 5e-4, update_frequency = 4, memory_buffer_size = 100000, batch_size = 64):
scores = [] # list containing scores from each episode
scores_window = deque(maxlen = 100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes + 1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay * eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
# if np.mean(scores_window)>=200.0:
# print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
print("saving trained results..")
agent.save_q_network_state('ll_checkpoint.pth')
# torch.save(agent.qnetwork_local.state_dict(), 'll_checkpoint.pth')
# break
return scores
if __name__ == "__main__":
# Initialize environment and Lunarlander agent
env = gym.make('LunarLander-v2')
env.seed(0)
env_state_size = env.observation_space.shape[0]
env_action_size = env.action_space.n
agent = Lunarlander(state_size = env_state_size, action_size = env_action_size,
random_seed = 0)
# # random landing for 10 times, basic understanding of gym env
# trival_landing(env, agent, render = False, plot = False)
# train
train_model(env, agent)
env.close() |
numero1 = int(input('Insira um número'))
numero2 = int(input('Insira outro número'))
numerofinal = numero1+numero2
print(numerofinal) |
from django.db import models
class Kafedra(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
def __str__(self):
return self.name
class Subject(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
kafedra = models.ForeignKey(Kafedra, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
class Teacher(models.Model):
first_name = models.CharField(max_length=100, null=False, blank=False)
last_name = models.CharField(max_length=100, null=False, blank=False)
subject = models.ForeignKey(Subject, null=True, on_delete=models.SET_NULL)
def __str__(self):
return f"{self.last_name} {self.last_name}"
class Faculty(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
faculty = models.ForeignKey(Faculty, null=True, on_delete=models.SET_NULL )
def __str__(self):
return self.name
class Student(models.Model):
first_name = models.CharField(max_length=100, null=False, blank=False)
last_name = models.CharField(max_length=100, null=False, blank=False)
group = models.ForeignKey(Group,null=True, on_delete=models.SET_NULL)
faculty = models.ForeignKey(Faculty, null=True, on_delete=models.SET_NULL)
def __str__(self):
return f"{self.last_name} {self.last_name}"
|
import rpyc
import sys
import os
import time
path = './client/sourcefiles/'
master_ip = '128.6.4.131'
chunk_ip = '128.6.13.131'
# Three Basic Functions:
# Download
# Upload
# Delete
class client:
client_root = os.path.expanduser("~")
client_root += "./gfs_root/client/"
if not os.access(client_root, os.W_OK):
os.makedirs(client_root)
print("create client root")
con_master = rpyc.connect(master_ip, port=18861)
master_sever = con_master.root
master = master_sever
chunkservers = {}
con_chunk = rpyc.connect(chunk_ip, port=8888)
chunkservers[0] = con_chunk.root
def __init__(self):
# connect to the master
return
def write(self, filename, data):
if self.exists(filename):
self.delete(filename)
print('file already exists')
else:
print('this is a new data')
with open(path + str(data), 'rb') as f:
data = f.read()
# num_chunks = self.num_chunks(len(data))
num_chunks = 1
# ---
# print("num_chunks",num_chunks)
# ---
chunkuuids = self.master.alloc_file(filename, num_chunks)
print("chunk_id ", chunkuuids)
self.write_chunks(chunkuuids, data)
def write_chunks(self, chunkuuids, data):
chunks = {}
chunks[0] = data
chunkservers = self.chunkservers
# print("num_chunkuids",len(chunkuuids))
for i in range(0, len(chunkuuids)): # write to each chunkserver
chunkuuid = chunkuuids[i]
chunkloc = self.master.get_chunkloc(chunkuuid)
# print("chunk_location",chunkloc)
chunkservers[chunkloc].write(chunkuuid, chunks[i])
def num_chunks(self, size):
return 1
def exists(self, filename):
return self.master.exists(filename)
def read(self, filename): # get metadata, then read chunks direct
if not self.exists(filename):
raise Exception("read error, file does not exist: "
+ filename)
chunks = []
chunkuuids = self.master.get_chunkuuids(filename)
chunkservers = self.chunkservers
for chunkuuid in chunkuuids:
chunkloc = self.master.get_chunkloc(chunkuuid)
chunk = chunkservers[chunkloc].read(chunkuuid)
chunks.append(chunk)
data = chunk
with open('./' + str(filename), 'wb') as f:
f.write(data)
def delete(self, filename):
self.master.delete(filename)
def show_file(self):
filelist = self.master.filelist
t1 = time.time()
client1 = client()
print("masterserver IP is ", master_ip)
print("chunkserver IP is ", chunk_ip)
sourcefile = 'source_file_'
for i in range(10):
start_time = time.time()
source = sourcefile + str(i)+'.txt'
print(source)
client1.write(source, source)
end_time = time.time()
print('write time', i+1, ':', (end_time-start_time))
client1.master.dump_metadata()
t2 = time.time()
print('Runtime = ', t2 - t1)
# client1.read('pic.jpg')
# finish_time = time.time()
# print('read time: ',(finish_time-end_time))
# print('end')
|
import math
a = int(input("Enter a Number : "))
b = int(input("Enter a Number : "))
print(math.gcd(a,b)) |
from functools import wraps
from flask import jsonify, request
from project.admin.models import User
class users_only(object):
"""
Check that the client has an auth_token for a valid user. If so, run the
decorated route_function. Optinally pass the user info into the route
function if pass_user is set to True.
"""
def __init__(self, pass_user=False):
self.pass_user = pass_user
def __call__(self, route_function, **kwargs):
@wraps(route_function)
def wrapper(**kwargs):
response_object = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
auth_header = request.headers.get('Authorization')
if auth_header:
auth_token = auth_header.split(' ')[1]
decode_response, _ = User.decode_auth_token(auth_token)
# If the token is invalid, deny access
if not isinstance(decode_response, int):
response_object['message'] = 'Auth token invalid.'
return jsonify(response_object), 401
# If the token is valid, allow access
user = User.query.filter_by(id=decode_response).first()
if self.pass_user:
return route_function(user, **kwargs)
else:
return route_function(**kwargs)
# If no auth_header was provided, return an error
return jsonify(response_object), 401
return wrapper
|
# -*- coding: utf-8 -*-
import responder
from responder import API
import time
import traceback
from alchemydb import Session, engine
from models import Tasks
from sqlutils import alchemytojson, alchemytodict
from todo import add_todo
from todo import delete_todo
from todo import update_todo
from todo import get_todo
from todo import get_todo_list
api = responder.API(
cors=True,
allowed_hosts=["*"],
)
@api.route("/")
def hello_html(req, resp):
resp.html = api.template('hello.html')
@api.route("/api/task/{id}")
def users_json(req, resp, *, id):
session = Session()
try:
user = session.query(Tasks).filter_by(id=id).first()
resp.headers = {"Content-Type": "application/json; charset=utf-8"}
resp.content = alchemytojson(user)
except Exception:
traceback.print_exc()
resp.media ={"errmessage":"Error occured"}
finally:
session.close()
print(engine.pool.status())
@api.route("/api/tasks")
def users_json(req, resp):
session = Session()
try:
users = session.query(Tasks).all()
resp.headers = {"Content-Type": "application/json; charset=utf-8"}
resp.content = alchemytojson(users)
except Exception:
traceback.print_exc()
resp.media ={"errmessage":"Error occured"}
finally:
session.close()
print(engine.pool.status())
class UpdateGetDeleteTodo:
def on_get(self, req, resp, *, id):
todo = get_todo(id)
resp.media = {
"status": True,
"todo": todo
}
async def on_put(self, req, resp, *, id):
@api.background.task
def process_update_todo(name, text):
time.sleep(3)
update_todo(id, name, text)
data = await req.media()
name = data['name']
text = data['text']
process_update_todo(name, text)
resp.media = {
'status': True
}
async def on_delete(self, req, resp, *, id):
@api.background.task
def process_delete_todo():
time.sleep(3)
delete_todo(id)
process_delete_todo()
resp.media = {
'status': True
}
class AddGetTodo:
def on_get(self, req, resp):
todos = get_todo_list()
resp.media = {
"status": True,
"todos": todos
}
async def on_post(self, req, resp):
@api.background.task
def process_add_todo(name, text):
time.sleep(3)
add_todo(name, text)
data = await req.media()
name = data['name']
text = data['text']
process_add_todo(name, text)
resp.media = {
'status': True
}
api.add_route("/api/todo", AddGetTodo)
api.add_route("/api/todo/{id}", UpdateGetDeleteTodo)
if __name__ == "__main__":
port = 5042
api.run(port=port) |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import argparse
import pyLikelihood as pyLike
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column
from fermipy import utils
from fermipy import spectrum
from fermipy import irfs
from fermipy import skymap
def main():
usage = "usage: %(prog)s [options]"
description = "Calculate the LAT point-source flux sensitivity."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument('--ltcube', default=None,
help='Set the path to the livetime cube.')
parser.add_argument('--galdiff', default=None, required=True,
help='Set the path to the galactic diffuse model.')
parser.add_argument('--isodiff', default=None,
help='Set the path to the isotropic model. If none then the '
'default model will be used for the given event class.')
parser.add_argument('--ts_thresh', default=25.0, type=float,
help='Set the detection threshold.')
parser.add_argument('--min_counts', default=3.0, type=float,
help='Set the minimum number of counts.')
parser.add_argument('--joint', default=False, action='store_true',
help='Compute sensitivity using joint-likelihood of all event types.')
parser.add_argument('--event_class', default='P8R2_SOURCE_V6',
help='Set the IRF name.')
parser.add_argument('--glon', default=0.0, type=float,
help='Galactic longitude.')
parser.add_argument('--glat', default=0.0, type=float,
help='Galactic latitude.')
parser.add_argument('--index', default=2.0, type=float,
help='Source power-law index.')
parser.add_argument('--emin', default=100., type=float,
help='Minimum energy in MeV.')
parser.add_argument('--emax', default=100000., type=float,
help='Maximum energy in MeV.')
parser.add_argument('--nbin', default=12, type=int,
help='Number of energy bins for differential flux calculation.')
parser.add_argument('--output', default='output.fits', type=str,
help='Output filename.')
parser.add_argument('--obs_time_yr', default=None, type=float,
help='Rescale the livetime cube to this observation time in years. If none then the '
'calculation will use the intrinsic observation time of the livetime cube.')
args = parser.parse_args()
event_types = [['FRONT','BACK']]
fn = spectrum.PowerLaw([1E-13,-args.index],scale=1E3)
log_ebins = np.linspace(np.log10(args.emin),np.log10(args.emax),args.nbin+1)
ebins = 10**log_ebins
ectr = np.exp(utils.edge_to_center(np.log(ebins)))
c = SkyCoord(args.glon,args.glat,unit='deg',frame='galactic')
if args.ltcube is None:
if args.obs_time_yr is None:
raise Exception('No observation time defined.')
ltc = irfs.LTCube.create_empty(0,args.obs_time_yr*365*24*3600.,
args.obs_time_yr*365*24*3600.)
ltc._counts *= ltc.domega[:,np.newaxis]/(4.*np.pi)
else:
ltc = irfs.LTCube.create(args.ltcube)
if args.obs_time_yr is not None:
ltc._counts *= args.obs_time_yr*365*24*3600./(ltc.tstop-ltc.tstart)
m0 = skymap.Map.create_from_fits(args.galdiff)
if args.isodiff is None:
isodiff = utils.resolve_file_path('iso_%s_v06.txt'%args.event_class,
search_dirs=[os.path.join('$FERMIPY_ROOT','data'),
'$FERMI_DIFFUSE_DIR'])
isodiff = os.path.expandvars(isodiff)
else:
isodiff = args.isodiff
iso = np.loadtxt(isodiff,unpack=True)
sig = []
bkg = []
for et in event_types:
psf = irfs.PSFModel(c.icrs,ltc,args.event_class,et,log_ebins)
exp = irfs.Exposure.create(ltc,args.event_class,et,np.log10(ectr))
expv = exp.get_map_values(c.icrs.ra.deg,c.icrs.dec.deg)
bkgv = m0.interpolate(c.l.deg,c.b.deg,ectr)
isov = np.exp(np.interp(np.log(ectr),np.log(iso[0]),np.log(iso[1])))
bkgv += isov
s, b = irfs.compute_ps_counts(ebins,expv,psf,bkgv,fn)
sig += [s]
bkg += [b]
sig = np.concatenate([np.expand_dims(t,-1) for t in sig])
bkg = np.concatenate([np.expand_dims(t,-1) for t in bkg])
norms = irfs.compute_norm(sig,bkg,args.ts_thresh,args.min_counts,sum_axes=[1,2])
npred = np.squeeze(np.apply_over_axes(np.sum,norms*sig,[1,2]))
norms = np.squeeze(norms)
flux = norms*fn.flux(ebins[:-1],ebins[1:])
eflux = norms*fn.eflux(ebins[:-1],ebins[1:])
dnde = norms*fn.dfde(ectr)
e2dnde = ectr**2*dnde
cols = [Column(name='E_MIN', dtype='f8', data=ebins[:-1], unit='MeV'),
Column(name='E_REF', dtype='f8', data=ectr, unit='MeV'),
Column(name='E_MAX', dtype='f8', data=ebins[1:], unit='MeV'),
Column(name='FLUX', dtype='f8', data=flux, unit='ph / (cm2 s)'),
Column(name='EFLUX', dtype='f8', data=eflux, unit='MeV / (cm2 s)'),
Column(name='DNDE', dtype='f8', data=dnde, unit='ph / (MeV cm2 s)'),
Column(name='E2DNDE', dtype='f8', data=e2dnde, unit='MeV / (cm2 s)'),
Column(name='NPRED', dtype='f8', data=npred, unit='ph')]
tab = Table(cols)
tab.write(args.output, format='fits', overwrite=True)
if __name__ == "__main__":
main()
|
#Coding: utf-8
__author__ = "Bruno Perotti"
class A:
def __init__(self):
print(id(self))
a= A()
print(id(a))
|
#!/usr/bin/python
# A very simple program to remove duplicates in a text file
# How it works: Putt the text file in the same folder then after run script, follow the instruction
# Made by Ali Ahmer aka King Ali
# www.facebook.com/master.king.ali.333
def banner():
print '====================================================='
print '|!!| Duplicate Entry Remover |!!|'
print '====================================================='
banner()
inpt=raw_input('Input Text file name e.g. input.txt :')
oupt=raw_input('Output file name e.g. output.txt :')
if __name__ == '__main__':
f = open(oupt,'w+')
flag = False
print 'Please Wait. File in Process.....'
with open(inpt) as fp:
for line in fp:
for temp in f:
if temp == line:
flag = True
print('Duplicate Found...!')
break
if flag == False:
f.write(line)
elif flag == True:
flag = False
f.seek(0)
f.close()
print '.../done' |
from __future__ import print_function, division
import sys
import glob
import time
import json
from pprint import pprint
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
cc1 = 0
cc2 = 0
tcc1 = 0
tcc2 = 0
bcc1 = 0
bcc2 = 0
cc3 = 0
tcc3 = 0
bcc3 = 0
progress = 0
dfdata = []
dfdata2 = []
dfdata3 = []
tocheck = set()
for ch in ('ch1', 'ch2', 'ch3'):
for filename in sorted(glob.glob(sys.argv[1] + '/' + ch + '/*/*/*.json')):
D, n, trial = filename.split('/')[-3:]
D = float(D.split('=')[-1])
n = int(n.split('=')[-1])
trial = trial.split('.')[0]
# print(D, n, trial)
if progress % 1600 == 0 and progress > 0:
print("ch1:", cc1, "/", bcc1, "ch2", cc2, "/", bcc2, "ch3", cc3, "/", bcc3, file=sys.stderr)
cc1 = 0
cc2 = 0
cc3 = 0
bcc1 = 0
bcc2 = 0
bcc3 = 0
progress += 1
with open(filename) as f:
data = json.load(f)
if 'ch1' in filename:
bcc1 += 1
if 'is_monotone' in data and data['is_monotone'] is not None:
cc1 += 1
tcc1 += 1
dfdata.append([D, n, trial, data['is_monotone'], data['computation_time']])
# if data['is_monotone']:
# print(filename.replace('ch1', 'ch2'))
# print(filename.replace('ch1', 'ch3'))
if 'ch2' in filename:
bcc2 += 1
if 'is_perturbable' in data:
cc2 += 1
tcc2 += 1
if type(data['is_perturbable']) == bool:
continue
if data['is_perturbable'] is not None:
if data['is_perturbable'] != 'Timeout':
for obj, vals in data['is_perturbable'].items():
dfdata2.append([D, n, trial + obj, vals, data['computation_time']])
else:
dfdata2.append([D, n, trial, 'Timeout', data['computation_time']])
# if data['is_perturbable'] != 'Timeout':
# if 'Error' not in data['is_perturbable'].values():
# vals = sum(data['is_perturbable'].values()) / int(n)
# else:
# vals = -1 # Error
# else:
# vals = 2 # Timeout
# dfdata2.append([D, n, trial, vals, data['computation_time']])
# if vals == 0:
# print(filename.replace('ch2', 'ch3'))
if 'ch3' in filename:
bcc3 += 1
if 'is_valid_buffer' in data:
cc3 += 1
tcc3 += 1
if data['is_valid_buffer'] == 'Timeout':
vals = 2 # Timeout
elif data['is_valid_buffer'] == 'Bad instance':
vals = -1 # Error
else:
if 'Timeout' in data['is_valid_buffer'].values():
vals = 2 # Timeout
elif 'Error' in data['is_valid_buffer'].values():
vals = -1 # Error
else:
vals = sum(data['is_valid_buffer'].values()) / len(data['is_valid_buffer'])
dfdata3.append([D, n, trial, vals, data['computation_time']])
# else:
# print(filename)
print("Total: ch1:", tcc1, "ch2", tcc2, "ch3", tcc3, file=sys.stderr)
columns = ["density", "number", "trial", "monotone", "time"]
df = pd.DataFrame(data=dfdata, columns=columns)
print(
'\nTimeouts: ',
len(df.query('monotone=="Timeout"')),
'\nErrors: ',
len(df.query('monotone=="Error"')),
'\nTotal: ',
len(df),
file=sys.stderr,
)
columns2 = ["density", "number", "trial", "perturbable", "time"]
df2 = pd.DataFrame(data=dfdata2, columns=columns2)
print(
'\nTimeouts: ',
len(df2.query('perturbable=="Timeout"')),
'\nErrors: ',
len(df2.query('perturbable=="Error"')),
'\nTotal: ',
len(df2),
file=sys.stderr,
)
print("Not non-2-tone: ", len(df2.query("perturbable==True")), file=sys.stderr)
columns3 = ["density", "number", "trial", "num_buff", "time"]
df3 = pd.DataFrame(data=dfdata3, columns=columns3)
print(
'\nTimeouts: ',
len(df3.query('num_buff==2')),
'\nErrors: ',
len(df3.query('num_buff==-1')),
'\nTotal: ',
len(df3),
file=sys.stderr,
)
print("2-tone: ", len(df3.query("num_buff>0 and num_buff<2")), file=sys.stderr)
|
from decimal import Decimal
import json
from urllib.parse import parse_qsl, urlparse
import pytest
from booking.exceptions import NoExchangeRateError
from booking.sync.payments import save_transaction
pytestmark = [pytest.mark.usefixtures("database")]
@pytest.fixture
def setup_rates(responses):
def request_callback(request):
parsed = urlparse(request.url)
query_string = dict(parse_qsl(parsed.query))
amount = Decimal(query_string["amount"])
currency = query_string["currency"]
rates = {"CZK": Decimal("25.5")}
try:
rate = rates[currency]
result = {"result": str(amount / rate)}
status = 200
except KeyError:
result = {"detail": "No such rate"}
status = 400
return status, {}, json.dumps(result)
responses.add_callback(responses.GET, "http://127.0.0.1:5000/to_eur", callback=request_callback)
@pytest.mark.usefixtures("setup_rates")
def test_save_transaction():
transaction = save_transaction(1, Decimal(2550), "CZK")
assert transaction.amount_eur == Decimal(100)
@pytest.mark.usefixtures("setup_rates")
def test_save_transaction_no_rates():
with pytest.raises(NoExchangeRateError, message="No such rate"):
save_transaction(1, Decimal(10), "NOK")
|
from pyspark import SparkContext
logFile = "file:////opt/modules/hadoop-2.8.5/README.txt"
sc = SparkContext("local", "first app")
logData = sc.textFile(logFile).cache()
numAs = logData.filter(lambda s: 'a' in s).count()
numBs = logData.filter(lambda s: 'b' in s).count()
print("Line with a:%i,lines with b :%i" % (numAs, numBs)) |
# Generated by Django 3.0.8 on 2020-10-23 01:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CivicConnect', '0004_profile_username'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='username',
),
]
|
import re
addr_file =r'/home/wang/Desktop/AndroidMalwareSample/Alsalah.gexf'
text = open(addr_file,'r+')
result = list()
patfile = open(r'/home/wang/Desktop/AndroidMalwareSample/1.txt').read()
patsub = patfile.split("|");
patt = ''
patten =''
for eachpat in patsub:
patt = patt + eachpat+"|"
patten = "(.*)" + "(" + patt[:-2] + ")"
color = '<viz:color r="255" g="0" b="0"/>\n'
allLines = text.readlines();
text.close()
#text.close()
#patt = r'(.*)(getSystemService)'
#patt = r'"(.*?)"'
count = 0
#patten = r"(.*)[a-zA-Z]+://(\w+(-\w+)*)(\.(\w+(-\w+)*))*(\?\S)?"#URL
#patten = r"(.*)13[0-9]|15[0|3|6|7|8|9]|18[8|9]\d{8}"#m-phone
#patten = r"(.*)((\d{3,4})|(\d{3,4}-?)|\s)?\d{7,8}"#telephone
#patten = r"(.*)\"(.*)\""
#patten = r"(.*)(((2[0-4]\d)|(25[0-5])|([01]?\d\d?))\.){3}((2[0-4]\d)|(25[0-5])|([01]?\d\d?))"#IP
#patten = r"(.*)(([1-9])|([1-9]\d)|(1\d{2})|(2[0-4]\d)|(25[0-5]))(\.((\d)|([1-9]\d)|(1\d{2})|(2[0-4]\d)|(25[0-5]))){3}"#IP
print patten
for eachline in allLines:
m = re.match(patten,eachline)
# print patten
# print "line is :%d\t|"% (count) + eachline
if m is not None:
# print "|||" + m.group()
# print "m.string:", m.string
# eachline = eachline + color
print "m.re:", m.re
print "line is :%d\t|"% (count) + eachline
allLines.insert((count+1),color)
s = ''.join(allLines)
fp = file(addr_file, 'w')
fp.write(s)
fp.close()
count += 1
# else:
# print "|"
#print "\ntext:"+text
#print "\npatt:"+patt
#print eachline;
|
# PUNTO 1-3
import hashlib
# We can see here the different hash algorithms that we have
print(hashlib.algorithms_guaranteed) # Guaranteed in all platforms
print(hashlib.algorithms_available) # Available in the current interpreter
## MD5
m1 = hashlib.md5()
m1.update(b"Cloud Computing time")
print(m1.hexdigest())
## SHA1
m2 = hashlib.sha1()
m2.update(b"Cloud Computing time")
print(m2.hexdigest())
## SHA2 (224)
m3 = hashlib.sha224()
m3.update(b"Cloud Computing time")
print(m3.hexdigest())
## SHA2 (256)
m3 = hashlib.sha256()
m3.update(b"Cloud Computing time")
print(m3.hexdigest()) |
#!usr/bin/env python
# INSTALL THE FOLLOWING PYTHON MODULES:
# - pip3 install scapy
# - pip3 install scapy_http
import sys
import scapy.all as scapy
from scapy.layers import http
#
def sniff(interface):
# iface: specify the interface used to sniff on.
# store: I tell scapy to not store packets in memory.
# prn: allows to specify a callback function (a function that is call every time that the sniff() function sniff
# a packet.
# OPTIONAL FILTERS: uses to specifies filters packets using "BPF syntax"
# SOME FILTER EXAMPLES:
# - udp: filter UDP packets
# - arp: filter ARP packets
# - tcp: filter TCP packets
# - port 21: filter packets on a specific port
# DOCUMENTATION LINK: https://scapy.readthedocs.io/en/latest/extending.html
#scapy.sniff(iface=interface, store=False, prn=process_sniffed_packet, filter=80)
scapy.sniff(iface=interface, store=False, prn=process_sniffed_packet)
def process_sniffed_packet(packet):
#print(packet)
# Check if our packet has HTTP layer. If our packet has the HTTP layer and it is HTTPRequest.
# In this way I am excluding some garbage information in which I am not interested into.
if packet.haslayer(http.HTTPRequest):
#print(packet.show())
url = packet[http.HTTPRequest].Host + packet[http.HTTPRequest].Path
url = url.decode("utf-8")
print("[+] HTTP Request: " + url)
# Login information are conteined into the load field of the "Raw" layer inside the previous
# container "http.HTTPRequest" layer:
if(packet.haslayer(scapy.Raw)):
print(packet[scapy.Raw].load) # Print only the information contained into the "Raw" layer
load = packet[scapy.Raw].load
try:
load = load.decode("utf-8")
keywords = ["username", "user", "login", "email", "e-mail", "mail", "password", "pass", "pswd"]
for keyword in keywords:
if keyword in load:
print("\n\n[+] Possibile username/password: " + load + "\n\n")
break
except UnicodeDecodeError:
print("\n\nThe packet Raw layer doesn't contain text. The content is:")
print(load)
print("--------------------------------------------------------------------------")
sniff("eth0") |
from os import environ
import traceback
import json
import boto3
import botocore
SECRETS = [
'SQS_QUEUE_ARN',
'SQS_QUEUE_NAME',
'SQS_QUEUE_URL',
'SQS_AWS_ACCESS_KEY_ID',
'SQS_AWS_SECRET_ACCESS_KEY',
'SQS_REGION'
]
def check_secrets():
assert set(SECRETS).issubset(set(environ)), "Required secrets are not present in environment variables. ENVIRONMENT: %s" % str(environ)
def create_boto3_client():
return boto3.client(
"sqs",
aws_access_key_id=environ['SQS_AWS_ACCESS_KEY_ID'],
aws_secret_access_key=environ['SQS_AWS_SECRET_ACCESS_KEY'],
region_name=environ['SQS_REGION']
)
def method_wrapper(method, **kwargs):
try:
check_secrets()
kwargs['client'] = create_boto3_client()
return {"success": True, 'response': method(**kwargs)}
except Exception as e:
tb = traceback.format_exc()
return {"success": False, "error": "%s %s\n\n%s" % (str(e.__class__), str(e), tb)}
def get_list(**kwargs):
messages = kwargs['client'].receive_message(
QueueUrl=environ['SQS_QUEUE_URL'],
MaxNumberOfMessages=10,
VisibilityTimeout=0,
WaitTimeSeconds=1,
MessageAttributeNames=['item_id']
)
if 'Messages' not in messages.keys():
return []
return [ [m['MessageAttributes']['item_id']['StringValue'], m['ReceiptHandle']] for m in messages['Messages'] ]
def get_item(item_id, **kwargs):
item = kwargs['client'].receive_message(
QueueUrl=environ['SQS_QUEUE_URL'],
MaxNumberOfMessages=1,
VisibilityTimeout=0,
WaitTimeSeconds=1,
MessageAttributeNames=['item_id']
)['Messages'][0]
return {
'content': item['Body'],
'item_id': item['MessageAttributes']['item_id']['StringValue']
}
def delete_item(item_id, **kwargs):
handles = [i[1] for i in get_list(client=kwargs['client']) if i[0] == item_id]
for h in handles:
try:
kwargs['client'].delete_message(
QueueUrl=environ['SQS_QUEUE_URL'],
ReceiptHandle=h
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Message'].endswith('The receipt handle has expired.'):
pass
else:
raise
return
def put_item(item_id, **kwargs):
assert kwargs['content_type'] == 'application/json', "PUT must have a json contentType"
assert kwargs['data'] != None, "PUT data empty"
try:
print(kwargs['data'])
data = json.loads(kwargs['data'])
except Exception:
traceback.print_exc()
raise Exception("put data is not valid json")
assert 'content' in data, 'missing key "content" in put data'
kwargs['client'].send_message(
QueueUrl=environ['SQS_QUEUE_URL'],
MessageBody=data['content'],
DelaySeconds=0,
MessageAttributes={
'item_id': {
'StringValue': item_id,
'DataType': 'String'
}
},
MessageGroupId='testgroup'
)
return
|
'''
Created on Oct 16, 2014
@author: vbms
'''
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GL.exceptional import glDeleteTextures
from Image import open
import config
textureLoaderTextures = {}
def loadTexture (filename):
global textureLoaderTextures
# if texture already loaded return its id
if filename in textureLoaderTextures:
return textureLoaderTextures[filename]
# load image
image = open(filename)
ix = image.size[0]
iy = image.size[1]
image = image.tostring("raw", "RGBX", 0, -1)
# Create Texture
textureId = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, textureId)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
if config.textureMode == config.textureMode_nearest:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
elif config.textureMode == config.textureMode_linear:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
elif config.textureMode == config.textureMode_mipmap:
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST)
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, ix, iy, GL_RGBA, GL_UNSIGNED_BYTE, image)
# save and return texture id
textureLoaderTextures[filename] = textureId
return textureId
def loadSphereTexture (filename):
global textureLoaderTextures
# if texture already loaded return its id
if filename in textureLoaderTextures:
return textureLoaderTextures[filename]
# load image
image = open(filename)
ix = image.size[0]
iy = image.size[1]
image = image.tostring("raw", "RGBX", 0, -1)
# Create Texture
textureId = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, textureId)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
# save and return texture id
textureLoaderTextures[filename] = textureId
return textureId
def destroyTexture (textureId = None):
global textureLoaderTextures
if textureId == None:
for textureFile in textureLoaderTextures:
destroyTexture.destroy(textureLoaderTextures[textureFile])
else:
glDeleteTextures(1,[textureId])
|
from django import forms
class AddTaskForm(forms.Form):
newTask = forms.CharField(label="Add Task")
|
import unittest
"""
Given an array of integers (positive and negative) find the largest continuous sum.
"""
def large_count_sum(arr):
if len(arr) == 0:
return 0
current_sum = arr[0]
max_sum = arr[0]
for nr in arr[1:len(arr)]:
current_sum = max(current_sum + nr, nr)
if current_sum > max_sum: # max_sum = max(current_sum, max_sum)
max_sum = current_sum
return max_sum
# class LargeContTest(unittest.TestCase):
# def test(self, sol):
# self.assertEqual(sol([1,2,-1,3,4,10,10,-10,-1]), 29)
# self.assertEqual(sol([0, -1, 1, -3, -5]), 1)
# self.assertEqual(sol([1,2,-1,3,4,-1]), 9)
class LargestSum(unittest.TestCase):
def test(self, sol):
self.assertEqual(sol([1,2,-1,3,4,10,10,-10,-1]), 29)
t = LargestSum()
t.test(large_count_sum) |
#calling alarm function
from CreateAlarm import *
import boto3
ServerName = "fg-ma-r-server"
Description= "customer:Future group, Private ip: 172.31.40.83 ; server configuration: 4 core 30 GiB RAM"
InstanceId="i-0446578ba34454170"
SnsTopic=['arn:aws:sns:us-west-2:770678546179:fg-ec2-al']
Namespace=['AWS/EC2','AWS/EC2','AWS/EC2','AWS/EC2','System/Linux','System/Linux','System/Linux']
Filesystem=['Nil','Nil','Nil','Nil','Nil','/dev/xvda1','/dev/xvdf']
MountPath=['Nil','Nil','Nil','Nil','Nil','/','/opt/commonconfig']
for i in range(5):
alarm(ServerName,Description,InstanceId,SnsTopic,i,Namespace)
for i in range(5,6):
AlarmDisk(ServerName,Description,InstanceId,SnsTopic,i,Namespace,Filesystem,MountPath)
#Filesystem = /prod/xvda1, InstanceId = i-0853686bc726a04a7 (apoteket-vlc-prod-app01), MountPath = /
|
"""
Create a 'guess the password' game , the user is given 3 attempts
to guess the password. Set the Password as “TechClub!!”
"""
attempt=1
while(attempt<=3):
password=input("Enter the Password : ")
if password=="TechClub!!":
print("You are Authenticated!!")
break
else:
attempt=attempt+1
if attempt > 3:
print("Your Attempts are Over!!") |
START_MENU = 'START_MENU'
SETTINGS = 'SETTINGS'
COMMAND_MENU = 'COMMAND_MENU'
SEND_MESSAGE = 'SEND_MESSAGE'
CHOOSE_COMMAND_OPTION = 'CHOOSE_COMMAND_OPTION'
DELETE_CONFIRM = 'DELETE_CONFIRM'
BACK_START = 'BACK_START'
SEND_NOTIFY_MESSAGE = 'SEND_NOTIFY_MESSAGE'
INPUT_CALLER = 'INPUT_CALLER'
INPUT_EDIT_CALLER = 'INPUT_EDIT_CALLER'
SEND_EDIT_MESSAGE = 'SEND_EDIT_MESSAGE'
EXIT_WITH_SAVE_CONFIRM = 'EXIT_WITH_SAVE_CONFIRM'
EXIT_NO_SAVE_CONFIRM = 'EXIT_NO_SAVE_CONFIRM'
|
n = input()
result = []
for i in n:
result.append(int(i))
result.sort(reverse=True)
answer = ""
for i in result:
answer += str(i)
print(answer)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.