text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 10:15:50 2019
@author: RoroLiao
"""
import matplotlib.pyplot as plt
import numpy as np
from complexity import Lattice, Ensemble
import scipy.stats as sps
"""task 2a"""
# import data
Llist = [4,8,16,32,64,128,256]
total_time = 1.6e5
h_list = np.loadtxt('h.txt')
tc_s_list = np.loadtxt('tc_s.txt')
# plot h vs t graph
fig = plt.figure()
for i in range(len(Llist)):
plt.plot(np.arange(total_time+1),h_list[i])
plt.vlines(x=tc_s_list[i],ymin=0,ymax=h_list[i][int(tc_s_list[i])],linestyles='dashed')
plt.legend(['L=4','L=8','L=16','L=32','L=64','L=128','L=256'])
plt.xlabel(r'time t')
plt.ylabel(r'height h(t;L)')
#plt.xscale('log')
#plt.yscale('log')
# find slope of log-log graphs
def fit_log(list0, list1):
loglist0 = [np.log(i) for i in list0]
loglist1 = [np.log(i) for i in list1]
fit = np.polyfit(loglist0,loglist1,1,cov=True)
print ('value=',fit[0])
print ('cov=',fit[1])
fit_fn = np.poly1d(fit[0])
fit_output = [np.exp(fit_fn[1])*(i**fit_fn[0]) for i in list0]
return fit_output,fit[0],fit[1]
# find the relations between tc and hc
# get hc
hc_s_list = []
for i in range(len(Llist)):
hc_s_list.append(np.mean(h_list[i][int(tc_s_list[i]):int(tc_s_list[i]+100)]))
#plot hc vs tc
fig = plt.figure()
plt.plot(tc_s_list,hc_s_list,'go')
fit1 = fit_log(tc_s_list,hc_s_list)
fit_hc_s_list = np.exp(fit1[1][1])*(tc_s_list**fit1[1][0])
print('hc vs tc, exponent=', fit1[1][0], fit1[2][0][0]**0.5)
plt.plot(tc_s_list,fit_hc_s_list,'limegreen')
plt.legend(['experimental data','linear fit'])
plt.xlabel(r'cross-over time $t_c$')
plt.ylabel(r'cross-over height $h_c$')
plt.xscale('log')
plt.yscale('log')
# evaluate the goodness of fit by chi squared test
chi_squared, p = sps.chisquare(hc_s_list, fit_hc_s_list)
print('chi_squared=', chi_squared, 'p value=',p) |
"""
Дано значення температури в градусах Цельсія. Вивести температуру в градусах Фаренгейта.
"""
import re
re_float = re.compile('^[+-]{0,1}\d+(\.){0,1}\d*$')
def validator(pattern,promt):
text = input(promt)
while not bool(pattern.match(text)):
text = input(promt)
return text
def what_temp():
run = input('Оберіть дію (1-перетворення з C у F, 2-перетворення з F у C): ')
if run == '1':
return celsius()
elif run == '2':
return fahrenheit()
else:
return what_temp()
def celsius():
temp = float(validator(re_float, 'Введіть температуру в градусах Цельсія: '))
return temp+273.15
def fahrenheit():
temp = float(validator(re_float,'Введіть температуру в Фаренгейтах: '))
return temp-273.5
print(what_temp()) |
import numpy as np
import cv2
import matplotlib.pyplot as plt
def draw_labeled_bounding_boxes(img, labeled_frame, num_objects):
"""
Starting from labeled regions, draw enclosing rectangles in the original color frame.
"""
# Iterate through all detected cars
for car_number in range(1, num_objects+1):
# Find pixels with each car_number label value
rows, cols = np.where(labeled_frame == car_number)
# Find minimum enclosing rectangle
x_min, y_min = np.min(cols), np.min(rows)
x_max, y_max = np.max(cols), np.max(rows)
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=(255, 0, 0), thickness=6)
return img
def compute_heatmap_from_detections(frame, hot_windows, threshold=5):
"""
Compute heatmaps from windows classified as positive, in order to filter false positives.
"""
h, w, c = frame.shape
heatmap = np.zeros(shape=(h, w), dtype=np.uint8)
for bbox in hot_windows:
# for each bounding box, add heat to the corresponding rectangle in the image
x_min, y_min = bbox[0]
x_max, y_max = bbox[1]
heatmap[y_min:y_max, x_min:x_max] += 1 # add heat
# apply threshold + morphological closure to remove noise
_, heatmap_thresh = cv2.threshold(heatmap, threshold, 255, type=cv2.THRESH_BINARY)
heatmap_thresh = cv2.morphologyEx(heatmap_thresh, op=cv2.MORPH_CLOSE,
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13, 13)), iterations=1)
return heatmap, heatmap_thresh
def draw_boxes(img, bbox_list, color=(0, 0, 255), thick=5):
"""
Draw all bounding boxes in `bbox_list` onto a given image.
:param img: input image
:param bbox_list: list of bounding boxes
:param color: color used for drawing boxes
:param thick: thickness of the box line
:return: a new image with the bounding boxes drawn
"""
# Make a copy of the image
img_copy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bbox_list:
tl_corner = tuple(bbox[0])
br_corner = tuple(bbox[1])
cv2.rectangle(img_copy, tl_corner, br_corner, color, thick)
return img_copy
if __name__=='__main__':
pass |
COURSE_NUM = 'CSV13'
ASSIGNMENT_NUM = 'EX04'
ASSIGNMENT_NAME = 'MovieReviews'
GITHUB_ORGANIZATION = 'vcc-csv13-fall2018' |
'''
Used to create files for practive
'''
import random as rd
for i in range(50):
one = str(rd.randrange(1,255))
two = str(rd.randrange(1,255))
three = str(rd.randrange(1,255))
four= str(rd.randrange(1,255))
note = str(rd.randrange(18,32))
with open('testprefixlist.txt','a') as file:
file.write('seq '+str(i)+' '+one+'.'+two+'.'+three+'.'+four+'/'+note+'\n')
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import filterfalse, groupby, tee
import json
import subprocess
from tempfile import NamedTemporaryFile
from .core import Benchmark
from ..utils.command import Command
from ..utils.maven import Maven
def partition(pred, iterable):
# adapted from python's examples
t1, t2 = tee(iterable)
return list(filter(pred, t1)), list(filterfalse(pred, t2))
class JavaMicrobenchmarkHarnessCommand(Command):
""" Run a Java Micro Benchmark Harness
This assumes the binary supports the standard command line options,
notably `-Dbenchmark_filter`
"""
def __init__(self, build, benchmark_filter=None):
self.benchmark_filter = benchmark_filter
self.build = build
self.maven = Maven()
""" Extract benchmark names from output between "Benchmarks:" and "[INFO]".
Assume the following output:
...
Benchmarks:
org.apache.arrow.vector.IntBenchmarks.setIntDirectly
...
org.apache.arrow.vector.IntBenchmarks.setWithValueHolder
org.apache.arrow.vector.IntBenchmarks.setWithWriter
...
[INFO]
"""
def list_benchmarks(self):
argv = []
if self.benchmark_filter:
argv.append("-Dbenchmark.filter={}".format(self.benchmark_filter))
result = self.build.list(
*argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
lists = []
benchmarks = False
for line in str.splitlines(result.stdout.decode("utf-8")):
if not benchmarks:
if line.startswith("Benchmarks:"):
benchmarks = True
else:
if line.startswith("org.apache.arrow"):
lists.append(line)
if line.startswith("[INFO]"):
break
return lists
def results(self, repetitions):
with NamedTemporaryFile(suffix=".json") as out:
argv = ["-Dbenchmark.runs={}".format(repetitions),
"-Dbenchmark.resultfile={}".format(out.name),
"-Dbenchmark.resultformat=json"]
if self.benchmark_filter:
argv.append(
"-Dbenchmark.filter={}".format(self.benchmark_filter)
)
self.build.benchmark(*argv, check=True)
return json.load(out)
class JavaMicrobenchmarkHarnessObservation:
""" Represents one run of a single Java Microbenchmark Harness
"""
def __init__(self, benchmark, primaryMetric,
forks, warmupIterations, measurementIterations, **counters):
self.name = benchmark
self.primaryMetric = primaryMetric
self.score = primaryMetric["score"]
self.score_unit = primaryMetric["scoreUnit"]
self.forks = forks
self.warmups = warmupIterations
self.runs = measurementIterations
self.counters = {
"mode": counters["mode"],
"threads": counters["threads"],
"warmups": warmupIterations,
"warmupTime": counters["warmupTime"],
"measurements": measurementIterations,
"measurementTime": counters["measurementTime"],
"jvmArgs": counters["jvmArgs"]
}
self.reciprocal_value = True if self.score_unit.endswith(
"/op") else False
if self.score_unit.startswith("ops/"):
idx = self.score_unit.find("/")
self.normalizePerSec(self.score_unit[idx+1:])
elif self.score_unit.endswith("/op"):
idx = self.score_unit.find("/")
self.normalizePerSec(self.score_unit[:idx])
else:
self.normalizeFactor = 1
@property
def value(self):
""" Return the benchmark value."""
val = 1 / self.score if self.reciprocal_value else self.score
return val * self.normalizeFactor
def normalizePerSec(self, unit):
if unit == "ns":
self.normalizeFactor = 1000 * 1000 * 1000
elif unit == "us":
self.normalizeFactor = 1000 * 1000
elif unit == "ms":
self.normalizeFactor = 1000
elif unit == "min":
self.normalizeFactor = 1 / 60
elif unit == "hr":
self.normalizeFactor = 1 / (60 * 60)
elif unit == "day":
self.normalizeFactor = 1 / (60 * 60 * 24)
else:
self.normalizeFactor = 1
@property
def unit(self):
if self.score_unit.startswith("ops/"):
return "items_per_second"
elif self.score_unit.endswith("/op"):
return "items_per_second"
else:
return "?"
def __repr__(self):
return str(self.value)
class JavaMicrobenchmarkHarness(Benchmark):
""" A set of JavaMicrobenchmarkHarnessObservations. """
def __init__(self, name, runs):
""" Initialize a JavaMicrobenchmarkHarness.
Parameters
----------
name: str
Name of the benchmark
forks: int
warmups: int
runs: int
runs: list(JavaMicrobenchmarkHarnessObservation)
Repetitions of JavaMicrobenchmarkHarnessObservation run.
"""
self.name = name
self.runs = sorted(runs, key=lambda b: b.value)
unit = self.runs[0].unit
time_unit = "N/A"
less_is_better = not unit.endswith("per_second")
values = [b.value for b in self.runs]
times = []
# Slight kludge to extract the UserCounters for each benchmark
counters = self.runs[0].counters
super().__init__(name, unit, less_is_better, values, time_unit, times,
counters)
def __repr__(self):
return "JavaMicrobenchmark[name={},runs={}]".format(
self.name, self.runs)
@classmethod
def from_json(cls, payload):
def group_key(x):
return x.name
benchmarks = map(
lambda x: JavaMicrobenchmarkHarnessObservation(**x), payload)
groups = groupby(sorted(benchmarks, key=group_key), group_key)
return [cls(k, list(bs)) for k, bs in groups]
|
from flask import Blueprint,render_template,redirect,url_for,request,flash
from flask import current_app as app
from .. import login_manager
from ..utils import role_required
from ..models import User,Role
from flask_login import login_required, logout_user, current_user, login_user, logout_user
# Blueprint Configuration
users_bp = Blueprint(
'users_bp', __name__,url_prefix='/users')
@users_bp.before_request
@login_required
def before_request():
if (current_user.role.name=='admin'):
pass
else:
flash('No está autorizado para acceder a esta sección','error')
return redirect(url_for('home_bp.dashboard'))
pass
@users_bp.route('/')
def home():
users = User.query.all()
return render_template(
'users/index.html',
segment = 'users',
users = users,
current_user=current_user,
)
@users_bp.route('edit/<id>',methods=['GET','POST'])
def edit(id):
user = User.query.get(id)
if(request.method=="POST"):
user= User.query.get(request.values.get('user_id'))
user.name=request.values.get('name')
user.lastname=request.values.get('lastname')
user.role_id = request.values.get('role_id')
user.email=request.values.get('email')
password = request.values.get('password')
password_confirm = request.values.get('password_confirm')
if password and password_confirm:
if password_confirm == password:
user.set_password(password)
flash('Password actualizada','success')
user.update()
flash('Datos modificados','success')
return redirect(url_for('users_bp.home'))
else:
roles = Role.query.all()
return render_template(
'users/edit-form.html',
user = user,
segment = 'users',
roles = roles,
current_user = current_user
)
@users_bp.route('/add',methods=['GET','POST'])
def add():
if request.method=='POST':
newUser= User(email=request.values.get('email'),
name=request.values.get('name'),
lastname=request.values.get('lastname'),
role_id=request.values.get('role_id'))
password = request.values.get('password')
password_confirm = request.values.get('password_confirm')
if password_confirm == password:
newUser.set_password(password)
try:
newUser.create()
flash('Nuevo usuario registrado','success')
except:
flash('Ha ocurrido un error','error')
return redirect(url_for('users_bp.home'))
else:
roles = Role.query.all()
return render_template(
'users/add-form.html',
segment = 'users',
roles = roles,
current_user=current_user,
)
@users_bp.route('/delete/<id>')
def delete(id):
user = User.query.get(id)
if user:
user.delete()
flash('Usuario eliminado','success')
else:
flash('Usuario no encontrado','error')
return redirect(url_for('users_bp.home')) |
#This variable is just for system printing issue. Related to Print_Screen_Off(exp)
sys_stdout_old=None
#----------------------------
#Storing a dictionary for the elements and their information that will be showed on the process model
#This variable belongs to Visualization.Log_Filter2()
deviant_element_plot={}
deviant_element_timeStamp_plot={}
#----------------------------
#This variable stores objects that contain events which are deviants in terms of timestamps.
#See Deviation.Deviation_Timestamp()
deviant_obj_timestamp=[]
#---------------------------
m_reduced=[]
m2_reduced=[]
stat_test_result={}
#It contains the list of deviant objects. Each object has several attributes. See Deviation.py
deviant_obj=[]
#----------------------------
#The following variables will be used for waiting time analysis
#The logs are row, which means that they contains duplicate traces.
log1_row=[]
log1_timestamps=[]
log2_row=[]
log2_timestamps=[]
#----------------------------
#Dictionaries that store unique traces with frequnecies
#like {0: '0,1,2,3,4,5,6,7,8,9',
# 1: '10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49'}
id1={}
id2={}
#Storing unique logs
log1=[]
log2=[] |
# Generated by Django 3.0.4 on 2020-05-09 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ArtiqueApp', '0011_auto_20200509_2006'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='emailid',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='appointment',
name='msg',
field=models.TextField(max_length=1000, null=True),
),
]
|
import sys
import csv
#time, ?, ?, ?, ?, type, mac, name
#1410992732,Radio,181,2462,1152,Beacn,00:12:0e:85:70:58,Jane's Wireless
names = set()
devices = {}
routers = {}
with open(sys.argv[1], 'rb') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if len(row) > 6:
time = row[0]
typ = row[5]
mac = row[6]
name = row[7]
if typ == 'Probe' and name != 'BROADCAST':
names.add(name)
if mac in devices:
# print devices[mac]
devices[mac].add(name)
if name in devices[mac] :
# print name + " is duplicate"
pass
else:
print name
else:
devices[mac] = set()
devices[mac].add(name)
for name in names:
print name
names_count = {}
for mac in devices.keys():
for name in devices[mac]:
if name in names_count:
names_count[name] += 1
else:
names_count[name] = 1
#prints mac address
# output = mac + ': ' + ', '.join(devices[mac])
# print output
#print names_count
## prints the unique names
#for w in sorted(names_count, key=names_count.get, reverse=True):
# print w + ': ' + str(names_count[w])
|
count_list_elements = int(input())
list_numbers = [int(i) for i in input().split()]
def definition_two_min_elem(array_elements: list = list_numbers) -> tuple:
"""Поиск двух самых меньших чисел в спике"""
# поиск самого меньшего числа
index_first_smallest_element = 0
for i in range(1, len(array_elements)):
if array_elements[index_first_smallest_element] > array_elements[i]:
index_first_smallest_element = i
# поиск второго наименьшего числа
index_second_smallest_element = 0
for i in range(len(array_elements)):
if array_elements[i] < array_elements[index_second_smallest_element] and i != index_first_smallest_element:
index_second_smallest_element = i
return array_elements[index_first_smallest_element], array_elements[index_second_smallest_element] #почему
# вернул кортеж?
for elem in definition_two_min_elem():
print(int(elem), end=' ')
|
from django.shortcuts import render,redirect
from .models import products
from django.contrib import messages
# Create your views here.
def store(request):
book = products.objects.all()
return render(request,'store.html',{'book':book})
def search(request):
name = request.GET['bkname']
category = products.objects.filter(bkname__icontains=name)
if category:
return render(request,'category.html',{'category':category})
else:
msg = ['Sorry!! No Book Found.']
return render(request,'category.html',{'msg':msg})
def more(request):
bkinfo = request.GET['bkbtn']
category = products.objects.filter(id=bkinfo)
return render(request,'more.html',{'category':category})
def category(request):
cate = request.GET['cate']
if cate == 'novel':
category = products.objects.filter(bkcatgory="Novels")
if category:
return render(request,'category.html',{'category':category})
else:
msg = ['Sorry!! No Book Available.']
return render(request,'category.html',{'msg':msg})
elif cate == 'Academic':
category = products.objects.filter(bkcatgory="Academics")
if category:
return render(request,'category.html',{'category':category})
else:
msg = ['Sorry!! No Book Available.']
return render(request,'category.html',{'msg':msg})
elif cate == 'Reference':
category = products.objects.filter(bkcatgory="Reference")
if category:
return render(request,'category.html',{'category':category})
else:
msg = ['Sorry!! No Book Available.']
return render(request,'category.html',{'msg':msg})
def seller(request):
if request.method == 'POST' and request.FILES['img']:
seller_name = request.POST['seller_name']
price = request.POST['price']
img = request.FILES['img']
bkname = request.POST['bkname']
bkauth = request.POST['bkauth']
bkcatgory = request.POST['bkcatgory']
bkedtion = request.POST['bkedtion']
seller_address = request.POST['seller_address']
seller_email = request.POST['seller_email']
product = products(seller_email=seller_email,seller_address=seller_address,bkedtion=bkedtion,bkcatgory=bkcatgory,bkauth=bkauth,bkname=bkname,seller_name=seller_name,price=price,img=img)
product.save()
print("added")
return redirect('/store')
|
import os
import numpy as np
import pandas as pd
from ..datasets.dataset_base import DatasetBase
from ..utils.constants import (
DEFAULT_FLAG_COL,
DEFAULT_ITEM_COL,
DEFAULT_ORDER_COL,
DEFAULT_RATING_COL,
DEFAULT_TIMESTAMP_COL,
DEFAULT_USER_COL,
)
# download_url
TAFENG_URL = r"https://1drv.ms/u/s!AjMahLyQeZqugjc2k3eCAwKavccB?e=Qn5ppw"
# processed data url
TAFENG_LEAVE_ONE_OUT_URL = r"https://1drv.ms/u/s!AjMahLyQeZqugWw1iWQHgI2NNbuM?e=LwEbEc"
TAFENG_RANDOM_SPLIT_URL = r"https://1drv.ms/u/s!AjMahLyQeZqugWbXQ__YWqF9v_7x?e=NjX5VQ"
TAFENG_TEMPORAL_SPLIT_URL = r"https://1drv.ms/u/s!AjMahLyQeZqugWp1Y1JefMXZr0ng?e=OoAgwD"
class Tafeng(DatasetBase):
"""Tafeng Dataset.
The dataset can not be download by the url, you need to down the dataset by
'https://1drv.ms/u/s!AjMahLyQeZqugjc2k3eCAwKavccB?e=Qn5ppw' then put it into the directory `tafeng/raw`.
"""
def __init__(
self, dataset_name="tafeng", min_u_c=0, min_i_c=3, min_o_c=0, root_dir=None
):
"""Init Tafeng Class."""
super().__init__(
dataset_name=dataset_name,
min_u_c=min_u_c,
min_i_c=min_i_c,
min_o_c=min_o_c,
root_dir=root_dir,
url=TAFENG_URL,
manual_download_url=TAFENG_URL,
processed_random_split_url=TAFENG_RANDOM_SPLIT_URL,
)
def preprocess(self):
"""Preprocess the raw file.
Preprocess the file downloaded via the url, convert it to a dataframe consist of the user-item interaction
and save in the processed directory.
"""
file_name = os.path.join(self.raw_path, "train.txt")
if not os.path.exists(file_name):
self.download()
original_train_file = os.path.join(self.raw_path, "train.txt")
original_test_file = os.path.join(self.raw_path, "test.txt")
# initial dataframe
interaction_list = []
with open(original_train_file) as ori_test_df:
for line in ori_test_df:
temp_list = line.replace("\n", "\t").split("\t")
# replace '\n' in the end of the line by '\t'
# split line by '\t'
# store split items in a list
order_id = temp_list[0]
item_ids_list = temp_list[1:-3] # item_ids
time_order = temp_list[-2].replace("-", "")
user_id = temp_list[-3]
for item_id in item_ids_list:
interaction_list.append(
[order_id, user_id, item_id, "train", "1", time_order]
)
print(len(interaction_list))
with open(original_test_file) as ori_test_df:
for line in ori_test_df:
temp_list = line.replace("\n", "\t").split("\t")
# replace '\n' in the end of the line by '\t'
# split line by '\t'
# store split items in a list
order_id = temp_list[0]
item_ids_list = temp_list[1:-3] # item_ids
time_order = temp_list[-2].replace("-", "")
user_id = temp_list[-3]
for item_id in item_ids_list:
interaction_list.append(
[order_id, user_id, item_id, "train", "1", time_order]
)
print(len(interaction_list))
interactions = np.array(interaction_list)
full_data = pd.DataFrame(
data={
DEFAULT_ORDER_COL: interactions[:, 0],
DEFAULT_USER_COL: interactions[:, 1],
DEFAULT_ITEM_COL: interactions[:, 2],
DEFAULT_FLAG_COL: interactions[:, 3],
DEFAULT_RATING_COL: interactions[:, 4],
DEFAULT_TIMESTAMP_COL: interactions[:, 5],
}
)
self.save_dataframe_as_npz(
full_data,
os.path.join(self.processed_path, f"{self.dataset_name}_interaction.npz"),
)
|
import os
import argparse
import copy
import socket
from dataclasses import dataclass
import numpy as np
from astropy import log
from astropy.coordinates import SkyCoord
from astropy.table import Table, QTable, unique, vstack
import astropy.units as u
import pyvo as vo
socket.setdefaulttimeout(600) # set timeout to 10 minutes
@dataclass
class StandardTableInfo(object):
tablename: str
time: str = "time"
end_time: str = 'end_time'
obsid: str = 'obsid'
ra: str = 'ra'
dec: str = 'dec'
name: str = 'name'
mode_entries: list = None
def set_default_mission_info():
chandra_dict = StandardTableInfo('chanmaster', end_time=None)
hitomi_dict = StandardTableInfo('hitomaster', end_time='stop_time')
integral_dict = StandardTableInfo(
'intscw', name='obs_type',
time='start_date', end_time='end_date', obsid='scw_id',
mode_entries=['spi_mode', 'ibis_mode',
'jemx1_mode', 'jemx2_mode', 'omc_mode'])
nicer_dict = StandardTableInfo('nicermastr')
nustar_dict = StandardTableInfo('numaster')
suzaku_dict = StandardTableInfo('suzamaster', end_time='stop_time')
swift_dict = StandardTableInfo('swiftmastr', time='start_time',
end_time='stop_time')
xmm_dict = StandardTableInfo(
'xmmmaster', mode_entries=['mos1_mode', 'mos2_mode',
'pn_mode', 'rgs1_mode', 'rgs2_mode'])
xte_dict = StandardTableInfo('xtemaster',
end_time=None, name='target_name')
mission_info = {'nustar': nustar_dict,
'chandra': chandra_dict,
'nicer': nicer_dict,
'xmm': xmm_dict,
'integral': integral_dict,
'hitomi': hitomi_dict,
'suzaku': suzaku_dict,
'swift': swift_dict,
'xte': xte_dict
}
return mission_info
mission_info = set_default_mission_info()
def get_rows_from_times(mission_table, times):
"""
Examples
--------
>>> start = np.arange(2, 4)
>>> end = np.arange(3, 5)
>>> labels = ['AA', 'BB']
>>> mission_table = QTable({'mjdstart': start, 'mjdend': end, 'label': labels})
>>> times = np.array([1.5, 2.1, 3.5, 4.5, 5.5])
>>> table = get_rows_from_times(mission_table, times)
>>> np.allclose(table['mjdstart'], [0, 2, 3, 0, 0])
True
>>> np.allclose(table['mjdend'], [0, 3, 4, 0, 0])
True
>>> np.all(table['label'] == np.array(['', 'AA', 'BB', '', '']))
True
"""
# Avoid invalid values
mission_table = mission_table[mission_table['mjdstart'] > 0]
start, end = mission_table['mjdstart'], mission_table['mjdend']
idxs = np.searchsorted(start, times + 1 / 86400)
result_table = QTable()
good = (times >= start[0]) & (times <= end[-1])
places_to_change = mission_table[idxs[good] - 1]
for col in mission_table.colnames:
newarr = np.zeros(times.size, dtype=mission_table[col].dtype)
newarr[good] = places_to_change[col]
result_table[col] = newarr
return result_table
def get_table_from_heasarc(
mission, max_entries=10000000, ignore_cache=False):
settings = mission_info[mission]
cache_file = f'_{settings.tablename}_table_cache.hdf5'
if mission == 'integral':
log.warning(
"The target name is not available in the INTEGRAL master table")
if not ignore_cache and os.path.exists(cache_file):
log.info(f"Getting cached table {cache_file}...")
table = Table.read(cache_file)
log.info("Done")
return table
heasarc_tap = vo.dal.TAPService(
"https://heasarc.gsfc.nasa.gov/xamin/vo/tap/")
colnames = (f"{settings.time},"
f"{settings.ra},{settings.dec},"
f"{settings.name},{settings.obsid}")
if settings.end_time is not None:
colnames += f",{settings.end_time}"
if settings.mode_entries is not None:
entries = ','.join(settings.mode_entries)
colnames += f",{entries}"
print(colnames)
query = f"""SELECT
TOP {max_entries}
"__row", {colnames}
FROM {settings.tablename}
"""
log.info(f"Querying {settings.tablename} table...")
table = heasarc_tap.search(query).to_table()
# print(table)
for key in ['obsid', 'name']:
col = getattr(settings, key)
values = [value for value in table[col].iter_str_vals()]
table.remove_column(col)
table[key] = values
if settings.mode_entries is not None:
for col in settings.mode_entries:
values = [value for value in table[col].iter_str_vals()]
table.remove_column(col)
table[col] = values
for col in ['__row']:
values = [float(value) for value in table[col]]
table.remove_column(col)
table[col] = values
table.rename_column(settings.time, 'mjdstart')
table.sort('mjdstart')
if settings.end_time is None:
table['mjdend'] = \
np.concatenate((table['mjdstart'][1:], table['mjdstart'][-1:] + 1))
else:
table.rename_column(settings.end_time, 'mjdend')
good = table['mjdend'] > table['mjdstart']
table = table[good]
log.info("Writing table to cache...")
table.write(cache_file, serialize_meta=True, overwrite=True)
return table
def get_all_change_times(missions=None, mjdstart=None, mjdstop=None,
ignore_cache=False):
"""
Examples
--------
>>> table1 = Table({'mjdstart': np.arange(4), 'mjdend': np.arange(1, 5)})
>>> table2 = Table({'mjdstart': np.arange(2.5, 5.5), 'mjdend': np.arange(3, 6)})
>>> table3 = Table({'mjdstart': np.arange(2.5, 5.5), 'mjdend': np.arange(3, 6)})
>>> table2['mjdstart'][:] = np.nan
>>> all_times = get_all_change_times(missions=[table1, table2, table3],
... mjdstart=None, mjdstop=4.1)
>>> np.allclose(all_times, [1, 2, 2.5, 3, 3.5, 4, 4.1])
True
"""
if missions is None or len(missions) == 0:
missions = list(mission_info.keys())
if mjdstart is not None:
change_times = [[mjdstart]]
else:
change_times = []
for mission in missions:
if isinstance(mission, Table): # Mainly for testing purposes
mission_table = mission
else:
mission_table = \
get_table_from_heasarc(mission, ignore_cache=ignore_cache)
alltimes = np.transpose(np.vstack(
(np.array(mission_table['mjdstart']),
np.array(mission_table['mjdend']))
)).flatten()
good = ~np.isnan(alltimes)
if mjdstart is not None:
good = good & (alltimes >= mjdstart)
if mjdstop is not None:
good = good & (alltimes <= mjdstop)
change_times.append(alltimes[good])
if mjdstop is not None:
change_times.append([mjdstop])
change_times = np.unique(np.concatenate(change_times))
return change_times[change_times > 0]
def filter_table_with_obsids(mission_table, obsid_list):
"""
Examples
--------
>>> table = Table({'obsid': ['0101', '0101', '2345', '5656', '9090'],
... 'mjd': [57000, 57000, 58000, 59000, 60000]})
>>> filt = filter_table_with_obsids(table, ['0101', '5656', '5656'])
>>> np.allclose(filt['mjd'], [57000, 57000, 59000, 59000])
True
"""
tables = []
for obsid in obsid_list:
if obsid == "":
log.error(f"Invalid obsid value: {obsid}")
# print(obsid, type(obsid))
mask = (mission_table['obsid'] == obsid)
tables.append(mission_table[mask])
return vstack(tables)
def sync_all_timelines(mjdstart=None, mjdend=None, missions=None,
ignore_cache=False):
if missions is None or len(missions) == 0:
missions = list(mission_info.keys())
all_times = get_all_change_times(
missions, mjdstart=mjdstart, mjdstop=mjdend, ignore_cache=ignore_cache)
result_table = QTable({'mjd': all_times})
for mission in missions:
mission_table = get_table_from_heasarc(mission)
cols = 'mjdstart,mjdend,ra,dec,obsid,name'.split(',')
restab = get_rows_from_times(mission_table[cols], all_times)
restab['skycoords'] = \
SkyCoord(np.array(restab['ra']),
np.array(restab['dec']), unit=('degree', 'degree'))
for col in cols:
result_table[f'{mission} {col}'] = restab[col]
result_table[f'{mission} coords'] = restab['skycoords']
return result_table
def get_all_separations(table, keyword='coords'):
"""
Examples
--------
>>> a = Table({'ra': [234, 122, 0.0], 'dec': [45, 0, 0.0]})
>>> b = Table({'ra': [234, 123, 1.], 'dec': [46, 0, 2.]})
>>> table = QTable()
>>> table['a coords'] = SkyCoord(a['ra'], a['dec'], unit=('degree', 'degree'))
>>> table['b coords'] = SkyCoord(b['ra'], b['dec'], unit=('degree', 'degree'))
>>> table = get_all_separations(table)
>>> np.allclose(table['dist_a--b'].value[:2], [1, 1])
True
>>> np.isnan(table['dist_a--b'].value[2])
True
"""
coord_cols = [col for col in table.colnames if keyword in col]
for i, coord1_col in enumerate(coord_cols):
for coord2_col in coord_cols[i + 1:]:
sep = table[coord1_col].separation(table[coord2_col])
bad_1 = (table[coord1_col].ra.value == 0.0) & (table[coord1_col].dec.value == 0.0)
bad_2 = (table[coord2_col].ra.value == 0.0) & (table[coord2_col].dec.value == 0.0)
sep[bad_1 | bad_2] = np.nan * u.deg
newcolname = \
(f'dist_{coord1_col.replace(keyword, "").strip()}--'
f'{coord2_col.replace(keyword, "").strip()}')
table[newcolname] = sep
return table
def filter_for_low_separations(table, max_dist=7 * u.arcmin, keyword='dist'):
"""
Examples
--------
>>> table = QTable({'dist_1--2': [1, 4, 0.003] * u.deg,
... 'dist_2--3': [0.001, np.nan, np.nan] * u.deg,
... 'dsdfasdfas': [0, 1, 3]})
>>> newtable = filter_for_low_separations(table)
>>> len(newtable['dist_1--2'])
2
>>> newtable['dist_1--2'][0].value
1.0
>>> newtable['dist_1--2'][1].value
0.003
"""
dist_cols = [col for col in table.colnames if keyword in col]
mask = np.zeros(len(table), dtype=bool)
for col in dist_cols:
good = ~np.isnan(table[col])
good = good & (table[col] <= max_dist)
mask = mask | good
return table[mask]
def main(args=None):
description = 'List all (quasi-)simultaneous observations between ' \
'high-energy missions '
parser = argparse.ArgumentParser(description=description)
parser.add_argument("missions",
help="Mission tables. Leave "
"blank for all supported missions",
type=str, nargs='*')
parser.add_argument("--mjdstart",
help="MJD start",
default=None, type=float)
parser.add_argument("--mjdstop",
help="MJD stop",
default=None, type=float)
parser.add_argument("--ignore-cache", action='store_true',
help="Ignore cache file",
default=False)
args = parser.parse_args(args)
mjdlabel = ''
if args.mjdstart is not None:
mjdlabel += f'_gt{args.mjdstart:g}'
if args.mjdstop is not None:
mjdlabel += f'_lt{args.mjdstop:g}'
missionlabel = '_all'
if len(args.missions) > 0:
missionlabel = "_" + '+'.join(args.missions)
cache_filename = f"_timeline{missionlabel}{mjdlabel}.hdf5"
log.info("Loading requested mission tables...")
if os.path.exists(cache_filename) and not args.ignore_cache:
synced_table = QTable.read(cache_filename)
else:
synced_table = sync_all_timelines(mjdstart=args.mjdstart,
mjdend=args.mjdstop,
missions=args.missions,
ignore_cache=args.ignore_cache)
log.info("Calculating separations...")
synced_table = get_all_separations(synced_table, keyword='coords')
synced_table.write(cache_filename, overwrite=True,
serialize_meta=True)
cols = [col for col in synced_table.colnames if 'dist_' in col]
for col in cols:
mission1, mission2 = col.replace('dist_', '').split('--')
log.info(f"Searching for matches between {mission1} and {mission2}")
good = ~np.isnan(synced_table[col])
good = good & (synced_table[col] <= 30 * u.arcmin)
res = copy.deepcopy(synced_table[good])
if len(res) == 0:
log.warning("No combinations here.")
continue
for dcol in res.colnames:
if dcol == col:
continue
elif '--' in dcol:
res.remove_column(dcol)
elif (mission1 not in dcol) and (mission2 not in dcol):
res.remove_column(dcol)
o1, o2 = f'{mission1} obsid', f'{mission2} obsid'
res['obsid_pairs'] = [
f'{obsid1},{obsid2}'
for obsid1, obsid2 in zip(res[o1], res[o2])]
res = unique(res, keys=['obsid_pairs'])
res.remove_column('obsid_pairs')
mission_table1 = filter_table_with_obsids(
get_table_from_heasarc(mission1), res[o1])
mission_table2 = filter_table_with_obsids(
get_table_from_heasarc(mission2), res[o2])
if mission_info[mission1].mode_entries is not None:
for col in mission_info[mission1].mode_entries:
res[f'{mission1} {col}'] = mission_table1[col]
if mission_info[mission2].mode_entries is not None:
for col in mission_info[mission2].mode_entries:
res[f'{mission2} {col}'] = mission_table2[col]
res.write(f'{mission1}-{mission2}{mjdlabel}.hdf5', serialize_meta=True,
overwrite=True)
res.write(f'{mission1}-{mission2}{mjdlabel}.ecsv',
overwrite=True)
def split_missions_and_dates(fname):
"""
Examples
--------
>>> fname = 'nustar-nicer_gt55000_lt58000.csv'
>>> outdict = split_missions_and_dates(fname)
>>> outdict['mission1']
'nustar'
>>> outdict['mission2']
'nicer'
>>> outdict['mjdstart']
'MJD 55000'
>>> outdict['mjdstop']
'MJD 58000'
>>> fname = 'nustar-nicer.csv'
>>> outdict = split_missions_and_dates(fname)
>>> outdict['mission1']
'nustar'
>>> outdict['mission2']
'nicer'
>>> outdict['mjdstart']
'Mission start'
>>> outdict['mjdstop']
'Today'
"""
no_ext = os.path.splitext(fname)[0]
split_date = no_ext.split('_')
mjdstart = 'Mission start'
mjdstop = 'Today'
if len(split_date) > 1:
for date_str in split_date[1:]:
if 'gt' in date_str:
mjdstart = 'MJD ' + date_str.replace('gt', '')
elif 'lt' in date_str:
mjdstop = 'MJD ' + date_str.replace('lt', '')
mission1, mission2 = split_date[0].split('-')
outdict = {'mission1': mission1, 'mission2': mission2,
'mjdstart': mjdstart, 'mjdstop': mjdstop}
return outdict
def summary(args=None):
description = 'Create summary page'
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files",
help="List of files of kind "
"mission1-mission2.{hdf5,csv}",
type=str, nargs='+')
parser.add_argument("-o", '--output', help='Output rst file',
default='outpage.rst', type=str)
args = parser.parse_args(args)
with open(args.output, 'w') as fobj:
for fname in args.files:
outdict = split_missions_and_dates(fname)
mission1 = outdict['mission1'].capitalize()
mission2 = outdict['mission2'].capitalize()
start = outdict['mjdstart']
stop = outdict['mjdstop']
title_str = (f'{mission1} - {mission2} matches'
f' (between {start} and {stop})')
print(title_str, file=fobj)
print('-' * len(title_str) + '\n', file=fobj)
table = QTable.read(fname)
cols = [col for col in table.colnames if
'obsid' in col or 'mjd' in col or 'name' in col]
table[cols].write(fobj, format='ascii.rst')
print(file=fobj)
|
from csv import DictReader
import io
from urllib.request import urlopen
from 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器
from 臺灣言語服務.models import 訓練過渡格式
from 匯入.指令 import 匯入枋模
class Command(匯入枋模):
help = 'https://github.com/Taiwanese-Corpus/TaiOanUe-LiLiKhokKhok-SuPio'
資料網址 = 'https://github.com/Taiwanese-Corpus/TaiOanUe-LiLiKhokKhok-SuPio/raw/master/sooji.csv'
公家內容 = {
'種類': '字詞',
'來源': '台語數字',
'年代': '2019',
}
def 全部資料(self, *args, **參數):
匯入數量 = 0
for 台語物件 in self._全部資料():
yield 訓練過渡格式(
文本=台語物件.看分詞(),
**self.公家內容
)
匯入數量 += 1
if 匯入數量 % 1000 == 0:
self.stdout.write('匯入 {} 筆'.format(匯入數量))
def _全部資料(self):
with urlopen(self.資料網址) as tsuliau:
with io.StringIO(tsuliau.read().decode('utf-8')) as tong:
for pit in DictReader(tong):
yield 拆文分析器.建立句物件(pit['漢字'], pit['羅馬字'])
|
# encoding: utf-8
import dbhelper_read
from datasys import timeHelper
from datasys.memcachedHelper import memcachedStatic
import service_config
# mc = memcachedStatic.getMemCache()
#
# def getMemcacheKeyForCatalogIndex(catalog_id):
# mckey_prefix = service_config.MEMCACHE_KEY_FOR_CATALOG_INDEX
# mc_key = "%s::%s" %(mckey_prefix,catalog_id)
# return mc_key
#
# def generateIndexOnCatalog():
# sql = 'select catalog_id from jd_catalog'
# catrows = dbhelper_read.executeSqlRead(sql)
# catlist = ['_EXPENSIVE_','_ALL_']
# for row in catrows:
# catlist.append(row['catalog_id'])
# for item in catlist:
# catalog_dict = {}
# rows = get_catalog_Rows(item)
# catalog_dict['data'] = rows
# catalog_dict['num'] = len(rows)
# mc_key = getMemcacheKeyForCatalogIndex(item)
# mcret = mc.set(mc_key,catalog_dict)
# print len(rows)
# print mcret
#
# def testGetIndexOnCatalog():
# sql = 'select catalog_id from jd_catalog'
# catrows = dbhelper_read.executeSqlRead(sql)
# catlist = ['_EXPENSIVE_','_ALL_']
# for row in catrows:
# catlist.append(row['catalog_id'])
# tdict = {}
# for item in catlist:
# mc_key = getMemcacheKeyForCatalogIndex(item)
# cat_dict = mc.get(mc_key)
# tdict[item] = cat_dict
# pass
#
# def get_catalog_Rows(catalog_name):
# startpos = 1
#
# catalog_sql_part = " catalog_id is not null and "
# if catalog_name == "_ALL_":
# pass
# elif catalog_name == "_EXPENSIVE_":
# catalog_sql_part += ' catalog_id<>1000 and catalog_id<>2000 and catalog_id<>3000 and '
# min_allowed_price = service_config.SKU_LIST_MIN_PRICE_FOR_EXPENSIVE
# else:
# catalog_sql_part = 'catalog_id = %s and ' %int(catalog_name)
#
# dt = timeHelper.getTimeAheadOfNowHours(service_config.SKU_LIST_APP_WORTHY_RECENCY_HOURS, timeHelper.FORMAT_LONG)
# sql = '''
# select
# *
# -- ,if(a=34,0,1) as stock_bit
# from
# jd_worthy_latest
# where
# %s
# worthy_value1 < %s
# and current_price >= %s
# and current_price < %s
# and this_update_time > '%s'
# -- and a <> 34 -- 有货,无货标志34
# order by
# -- stock_bit DESC,
# worthy_value1 ASC
# -- limit %s, %s
# limit %s
# ''' %( catalog_sql_part,
# service_config.SKU_LIST_MIN_ALLOWED_WORTHY_VALUE,
# service_config.SKU_LIST_MIN_ALLOWED_PRICE,
# service_config.SKU_LIST_MAX_ALLOWED_PRICE,
# dt,
# startpos,
# service_config.SKU_LIST_FRAME_SIZE,
# service_config.SKU_LIST_MAX_RECALL_NUM
# )
# print sql
# retrows = dbhelper_read.executeSqlRead(sql,is_dirty=True)
# return retrows
#
#
# if __name__ == "__main__":
#
# generateIndexOnCatalog()
# testGetIndexOnCatalog()
# pass |
import unittest
from dal.sparql_queries import get_countries_by_risk_score, get_countries_with_risk_score
from util.pd_utils import get_as_df
class TestSparql(unittest.TestCase):
def test_get_countries(self):
x = get_countries_by_risk_score(3)
self.assertTrue(len(x) > 5, "There should be more countries with risk level 3")
def test_get_risk_scores(self):
x = get_countries_with_risk_score()
y = get_as_df(x, ['country', 'risk_level'])
self.assertIsNotNone(x, "No countries found")
|
from pathlib import Path
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %% LOAD DATA i.e. ALL CSV FILES
data_folder = Path('./data/raw_sections')
all_files = data_folder.glob("*.csv")
data = pd.concat([
pd.read_csv(file, index_col=0).assign(Filename=file.stem)
for file in all_files
])
data[['Stage', 'Sample', 'AP', 'CultureTime']] = data.Filename.str.split(' ', expand=True)
data = data.groupby(['Stage', 'Sample', 'AP']).agg({'Angle': 'mean', 'Length': 'mean'})
# %% SAVE DATA AS AN EXCEL FILE
#if __name__ == '__Sections__':
# writer = pd.ExcelWriter('results/data_sections.xlsx')
# data.to_excel(writer, sheet_name='data')
# writer.save()
# %% CALCULATE Z SCORES GROUPED BY STAGE & AP
data.groupby(['Stage', 'AP']).mean()
data['Standardised Angle'] = (data['Angle'] - data.groupby(['Stage', 'AP']).Angle.transform('mean')) / data.groupby(['Stage', 'AP']).Angle.transform('std', ddof=0)
data['Standardised Length'] = (data['Length'] - data.groupby(['Stage', 'AP']).Length.transform('mean')) / data.groupby(['Stage', 'AP']).Length.transform('std', ddof=0)
data = data.drop(columns=['Angle', 'Length'])
data = data.reset_index()
data = data.set_index(['Stage', 'Sample'], append=True)
# %% PLOT GRAPH
# %% Scatter graphs against time
#with sns.axes_style('white'):
# g = sns.jointplot(x='Time', y='Standardised AP', data=data, kind='kde');
# g.set_axis_labels('Time (E12.5 + hours)', 'Standardised Shelf Length', fontsize=16)
#g = sns.regplot(x='Time', y='Standardised AP', data=fixed_data, color=".3", fit_reg=False, x_jitter=.1)
#g.set(xlabel='Embryonic Age (E12.5 + hours)', ylabel='Standardised Shelf Length')
# %% Scatter + histo + density graphs
g = sns.PairGrid(data, hue='AP')
g.map_upper(sns.scatterplot)
g.map_lower(sns.kdeplot)
g.map_diag(sns.kdeplot, lw=2, bw_method=.5)
g.add_legend()
plt.show() |
from os import CLD_CONTINUED, stat
from django.shortcuts import render
from django.views import generic
from django.contrib.auth.models import User
from django.core import serializers
from django.http import HttpResponse
from rest_framework import viewsets
import rest_framework
from rest_framework.response import Response
from .serializers import ClientSerializer
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework import generics
import requests
from .models import Client
# Create your views here.
class Index(generic.TemplateView):
template_name = "home/index.html"
def get(self, request):
numbers = [i for i in range(10)]
queryset = User.objects.all()
context = {
'message': "Hola mundo desde CS",
'numbers': numbers,
'users': queryset
}
return render(request, "home/index.html", context)
class ListClientView(generic.ListView):
template_name = "home/list_client.html"
model = Client
# queryset = Client.objects.all() -----> select * from Client
class PokeView(generic.TemplateView):
template_name = "home/pokelist.html"
def get (self, request):
url = "https://pokeapi.co/api/v2/pokemon/gengar"
response = requests.get(url)
r = response.json()
context = {
"pokemon": r["moves"]
}
return render(request, "home/pokelist.html", context)
def wsClient(request):
queryset = Client.objects.all()
data = serializers.serialize('json', queryset)
return HttpResponse(data, content_type="application/json")
class ViewWsClient(generic.TemplateView):
template_name = "home/view_ws_client.html"
def get(self, request):
url = "http://localhost:8000/home/ws/clients/"
response = requests.get(url)
context = {
"obj": response.json()
}
return render(request, "home/view_ws_client.html", context)
class ClientViewSet(viewsets.ModelViewSet):
queryset = Client.objects.all()
serializer_class = ClientSerializer
@api_view(["GET", "POST"])
def client_list(request):
if request.method=="GET":
clients = Client.objects.all()
data = ClientSerializer(clients, many = True)
return Response(data.data, status=status.HTTP_200_OK)
elif request.method=="POST":
data = ClientSerializer(data = request.data)
if data.is_valid():
data.save()
return Response(data.data, status=status.HTTP_201_CREATED)
return Response(data.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET", "PUT", "DELETE"])
def client_update(request, pk=None):
client = Client.objects.filter(id=pk).first()
if client:
if request.method=="GET":
data = ClientSerializer(client)
return Response(data.data, status=status.HTTP_200_OK)
elif request.method=="PUT":
data = ClientSerializer(client, data = request.data)
if data.is_valid():
data.save()
return Response(data.data, status=status.HTTP_200_OK)
return Response(data.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method=="DELETE":
return Response()
return Response({"message": "Client Not Found"}, status=status.HTTP_400_BAD_REQUEST)
class ClientListAPIView(APIView):
def get(self, request):
clients = Client.objects.all()
data = ClientSerializer(clients, many=True)
return Response(data.data,status=status.HTTP_200_OK)
class DetailClientAPIView(APIView):
def get(self, request, pk):
client = Client.objects.filter(id=pk).first()
data = ClientSerializer(client)
return Response(data.data, status=status.HTTP_200_OK)
class ClientListCreate(generics.ListCreateAPIView):
serializer_class = ClientSerializer
queryset = ClientSerializer.Meta.model.objects.filter(status=True)#Client.objects.filter(status=True)
class ClientRetrieveUpdateDestroy(generics.RetrieveUpdateDestroyAPIView):
serializer_class = ClientSerializer
queryset = ClientSerializer.Meta.model.objects.all()
|
#!/usr/bin/env python3
# this file is part of the github repository: https://github.com/nwhoppe/nanobody_fitness
# author: nwhoppe
# created: 3/17/20
import argparse
from datetime import datetime
import pandas as pd
import pickle
from sklearn import tree
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
def split_dataframes_train_test(class0_dataframe_csv, class1_dataframe_csv, test_size):
"""split data for each cass independently to keep the proportions of classes equal between train and test"""
class0 = pd.read_csv(class0_dataframe_csv, index_col=0)
class0.loc[:, 'class'] = 0
class1 = pd.read_csv(class1_dataframe_csv, index_col=0)
class1.loc[:, 'class'] = 1
if test_size > 0:
train_class0, test_class0 = train_test_split(class0, test_size=test_size)
train_class1, test_class1 = train_test_split(class1, test_size=test_size)
train = pd.concat([train_class0, train_class1])
test = pd.concat([test_class0, test_class1])
test_true_class = test.loc[:, 'class']
test.drop(columns=['class'], inplace=True)
else:
train = pd.concat([class0, class1])
test = pd.DataFrame()
test_true_class = pd.DataFrame()
train_true_class = train.loc[:, 'class']
train.drop(columns=['class'], inplace=True)
# make sure counts equal fasta files
print('class0 count: {0}'.format(class0.shape))
print('class1 count: {0}'.format(class1.shape))
print('training set size: {0}'.format(train.shape))
print('test set size: {0}'.format(test.shape))
return train, train_true_class, test, test_true_class
def calculate_roc_stats(true_classes, predicted_classes):
c_matrix = confusion_matrix(true_classes, predicted_classes)
tn, fp, fn, tp = c_matrix.ravel()
print('Confusion matrix:')
print(c_matrix)
print("True positive rate = {0}".format(float(tp)/(tp + fn)))
print("False positive rate = {0}".format(float(fp)/(fp + tn)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""use a decision tree to classify nanobody sequences as high or
low fitness. required input is one-hot encoded csv dataframes of CDR sequences""")
required = parser.add_argument_group('required')
required.add_argument('-d0', '--dataframe_csv0', required=True,
help='input dataframe csv from encode_one_hot_cdrs_from_fasta.')
required.add_argument('-d1', '--dataframe_csv1', required=True,
help='input dataframe csv from encode_one_hot_cdrs_from_fasta.')
parser.add_argument('-lm', '--min_samples_leaf', type=int, default=1)
parser.add_argument('-m', '--max_depth', type=int, default=None)
# parser.add_argument('-r', '--random_seed', type=int, default=0) # doesnt make sense to have this with random split
parser.add_argument('-t', '--test_size', type=float, default=0.2)
args = parser.parse_args()
df_train, df_train_class, df_test, df_test_class = split_dataframes_train_test(
args.dataframe_csv0, args.dataframe_csv1, args.test_size)
# seed_int = args.random_seed
depth = args.max_depth
leaf_min = args.min_samples_leaf
date_time_string = datetime.now().strftime('%Y%m%d_%H.%M.%S')
output_file_name = 'decision_tree_model_depth{0}_leaf_min{1}_{2}'.format(depth, leaf_min, date_time_string)
dt_classifier = tree.DecisionTreeClassifier(max_depth=depth, min_samples_leaf=leaf_min).fit(df_train, df_train_class)
tree.export_graphviz(dt_classifier, out_file='{0}.dot'.format(output_file_name), feature_names=list(
df_train.columns.values),
class_names=['Low', 'High'], rounded=True, filled=True)
pickle.dump(dt_classifier, open('{0}.p'.format(output_file_name), 'wb'))
print("Model saved to: {0}".format(output_file_name))
if args.test_size > 0:
predicted_classes = dt_classifier.predict(df_test)
calculate_roc_stats(df_test_class, predicted_classes)
print(dt_classifier.feature_importances_)
|
import time
from collections import defaultdict
from ndc_api import ndc_api
'''
Module: va_metadata_builder.py
Author: Jay Pedersen, UNMC, Pathology/Microbiology Department, Feb 9, 2017
Purpose:
(a) Define va_metadata_builder class to build the VA hierarchy drug class
for use in SCILHS metadata.
'''
class va_metadata_builder():
# --------------------------------------------------------------------------------------|
# VA Drug Classification -- Metadata Build (Olivier Bodenrieder input) |
# --------------------------------------------------------------------------------------|
def __init__(self, rxnav, rxnorm_coding):
self.rxnav = rxnav
self.rxnorm_coding = rxnorm_coding
self.start_t = None # set by build method
self.ndc_api = ndc_api()
self.metadata_rows = {}
self.classid_paths = {}
self.scd_for_va_classid = {} # generic drugs associated with each VA class
self.ingred_for_va_classid = {} # ingredient rxcui associated with each VA class
self.scd_for_ingred_for_va_classid = {} # generic drugs associated with each ingredient, specific for VA class
self.ingredient_rxcui_to_paths = {}
self.rootpath = None # set in build_va_folders
self.ndfrt_name = {} # given NDRFT code, return associated name, set in build_va_folders
def show_hlevels(self):
hlevels = defaultdict(int) # default value is zero
for x in self.metadata_rows.keys(): hlevels[self.metadata_rows[x]['c_hlevel']] += 1
for x in sorted(hlevels.keys()): print('Level %2d ==> %5d' % (x, hlevels[x]))
def determine_va_readable_path(self, mypath):
readable_path = ''
# determine the c_name for the first element in the VA drug class path
# For example, the path
# \PCORI\MEDICATION\RXNORM_CUI\N0000029074\
# Is for "Antimicrobials
# \PCORI\MEDICATION\RXNORM_CUI\N0000029074\N0000029080\"
# Is "Chloramphenicol" child of "Antimicrobials"
# Find the c_name for the first two NDFRT codes in the path
pathlist = mypath.split('\\') # eg: ['PCORI','MEDICATION','RXNORM_CUI','N0000029074']
va_classes = [x for x in pathlist if x.startswith('N') and (x[1:]).isdigit()] # N<digits> are NDFRT codes
ndfrt_list = va_classes[:3] # get up to three NDFRT codes
readable_path = ''
for list_idx,ndfrt_code in enumerate(ndfrt_list):
if len(readable_path)==0: readable_path = ', VA: '
readable_path += (', ' if list_idx > 0 else '') + self.ndfrt_name[ndfrt_code]
return readable_path
def build_va_folders(self, tlist, level, parent_path, rootpath):
self.rootpath = rootpath
for x in tlist:
myclassid = x['rxclassMinConceptItem']['classId'] # eg: N0000010574, which are NDFRT codes, per Jeff Klann ==> NDFRT:<code>
if parent_path!=rootpath:
self.metadata_rows[parent_path]['children'] += 1
mypath = parent_path+myclassid+'\\'
name = x['rxclassMinConceptItem']['className'].lower().capitalize()
self.ndfrt_name[myclassid] = name
else:
mypath = parent_path+'RXNORM_CUI'+'\\' # Match Jeff Klann, want \PCORI\MEDICATION\RXNORM_CUI\% paths
name = 'VA Drug Classes' # Match Jeff Klann, replace 'VA Classes (VA)'
# x['rxclassMinConceptItem'] example ==> {u'classId': u'N0000029360', u'className': u'MULTIVITAMINS', u'classType': u'VA'}
# ==> Use className -- but lowercase and then capitalize (only uppercase first letter)
c_basecode = 'VACLASS:%s' % myclassid # no longer NDFRT, per Lee Peters, Aug 2018
c_fullname = mypath
readable_path = self.determine_va_readable_path(c_fullname)
self.metadata_rows[mypath] = \
{ 'c_fullname': c_fullname,
'c_hlevel': level,
'c_name': name,
'c_basecode': c_basecode,
'c_tooltip': 'VA drug class%s' % readable_path,
'classId': myclassid,
'children': 0,
'tty': 'VAclass',
'rxcui': None }
self.classid_paths[myclassid] = mypath
if 'rxclassTree' in x: # build next level of tree (recursive, depth-first)
self.build_va_folders(x['rxclassTree'], level+1, mypath, rootpath)
return
def rxcui_type_name(self, tty):
return 'Ingredient' if tty=='IN' else 'Orderable Drug'
def add_rxcui_metadata_child(self, child_rxcui, my_parent_path, child_path, child_name, tty):
self.metadata_rows[my_parent_path]['children'] += 1
c_basecode = 'RXNORM:%d' % child_rxcui
c_fullname = child_path
hlevel = self.metadata_rows[my_parent_path]['c_hlevel']+1
readable_path = self.determine_va_readable_path(c_fullname)
if hlevel>12: print('[HIST3] hlevel=%d, parent [%s]\n child [%s]' %
(hlevel,my_parent_path,child_path)) # DEBUG
self.metadata_rows[child_path] = \
{ 'c_fullname': c_fullname,
'c_hlevel': hlevel,
'c_name': child_name,
'c_basecode': c_basecode,
'c_tooltip': '%s (RxNAV tty:%s)%s' % (self.rxcui_type_name(tty), tty, readable_path),
'children': 0,
'tty': tty,
'rxcui': child_rxcui }
def find_generic_drugs_for_VA_classes(self):
print('Obtaining generic drugs for VA classes from NLM (RxClass)')
for va_classid in self.classid_paths.keys():
self.scd_for_va_classid[va_classid] = set() # only care at leaf level
if self.metadata_rows[ self.classid_paths[va_classid] ]['children'] == 0: # leaf
result_rxcuis = self.rxnav.get_generic_drugs_for_VA_class(va_classid)
for rxcui in result_rxcuis:
self.scd_for_va_classid[va_classid].add(rxcui)
# end for va_classid
# end find_generic_drugs_for_VA_classes
def find_ingredients_for_generic_drugs(self):
print('Obtaining ingredients for generic drugs returned for VA classes from NLM (RxClass)')
for va_classid in sorted(self.classid_paths.keys()):
self.ingred_for_va_classid[va_classid] = set()
for scd_rxcui in self.scd_for_va_classid[va_classid]: # next generic drug associated with class
ingredient_rxcuis = self.rxnav.get_ingredients_for_generic_drug(scd_rxcui)
for ingred_rxcui in ingredient_rxcuis: # (rxcui,name,tty) of next ingredient for this generic drug
self.ingred_for_va_classid[va_classid].add(ingred_rxcui) # ingredient for this VA class
# Want to know the class-specific set of generic drugs associated with the ingredient.
# ==> we are going through the list of generic drugs for the class one by one
# ==> the generic drug defined by 'scd' is a generic drug associated with the ingredient
# ==> we are processing for this class (and associated at the class level).
if va_classid not in self.scd_for_ingred_for_va_classid:
self.scd_for_ingred_for_va_classid[va_classid] = {}
if ingred_rxcui not in self.scd_for_ingred_for_va_classid[va_classid]:
self.scd_for_ingred_for_va_classid[va_classid][ingred_rxcui] = set()
self.scd_for_ingred_for_va_classid[va_classid][ingred_rxcui].add(scd_rxcui)
# self.ingredient_rxcui_to_paths -- needed for historical code processing
if ingred_rxcui not in self.ingredient_rxcui_to_paths:
self.ingredient_rxcui_to_paths[ingred_rxcui] = set()
self.ingredient_rxcui_to_paths[ingred_rxcui]\
.add(self.classid_paths[va_classid]+str(ingred_rxcui)+'\\')
return
def find_associated_branded_drugs(self):
print('Determine associated branded drugs for generic drugs returned for VA classes from NLM (RxClass)')
# Find generic drugs
generic_drug_paths = [x for x in self.metadata_rows.keys() if (self.metadata_rows[x]['tty'] in ['SCD','GPCK']) ]
for generic_drug_path in generic_drug_paths:
scd_rxcui = self.metadata_rows[generic_drug_path]['rxcui']
# See if there is a branded drug associated with the generic drug whose RXCUI is held in scd
branded_drug_rxcuis = self.rxnav.get_branded_drugs_for_generic_drug(scd_rxcui)
for sbd_rxcui in branded_drug_rxcuis:
name = self.rxnorm_coding.get_rxcui_name(sbd_rxcui)
tty = self.rxnorm_coding.get_rxcui_tty(sbd_rxcui)
my_parent_path = '\\'.join(['']+[x for x in generic_drug_path.split('\\')
if len(x) > 0][:-1]+['']) # ingredient path, was -- generic_drug_path
child_name = name
child_path = my_parent_path+str(sbd_rxcui)+'\\'
if child_path not in self.metadata_rows: # may have already been seen
self.add_rxcui_metadata_child(sbd_rxcui, my_parent_path, child_path, child_name, tty)
# end for generic_drug_path
# end find_associated_branded_drugs
def find_ndc_codes_for_drugs(self):
print('Computing NDCs for branded and generic drugs')
drug_paths = [x for x in self.metadata_rows.keys()
if (self.metadata_rows[x]['tty'] in ['SBD','BPCK','SCD','GPCK']) ]
for drug_path in drug_paths:
my_parent_path, sbd_rxcui = drug_path, self.metadata_rows[drug_path]['rxcui'] # was -- = tup
ndcs_for_sbd_rxcui = self.rxnav.get_ndcs_for_drug(sbd_rxcui) # list of ndc codes
for ndc in ndcs_for_sbd_rxcui:
child_path = my_parent_path+ndc+'\\'
child_name = self.ndc_api.get_ndc_name(ndc)
if child_name == str(ndc): child_name = '(%s) %s' % (ndc, self.metadata_rows[my_parent_path]['c_name'])
self.metadata_rows[my_parent_path]['children'] += 1
c_basecode = 'NDC:%s' % ndc
c_fullname = child_path
readable_path = self.determine_va_readable_path(c_fullname)
self.metadata_rows[child_path] = \
{ 'c_fullname': c_fullname,
'c_hlevel': self.metadata_rows[my_parent_path]['c_hlevel']+1,
'c_name': child_name, # descriptive name for NDC when available
'c_basecode': c_basecode,
'c_tooltip': 'Package for Orderable Drug %s%s' %
(self.metadata_rows[my_parent_path]['c_basecode'], readable_path),
'children': 0,
'tty': 'NDC',
'rxcui': None }
print('After NDCs: %d REST API requests,\nSeconds since start: %s' %
(self.rxnav.get_request_count(),str(time.time()-self.start_t)))
print('Cache hits: %s' % self.rxnav.get_cache_usage())
self.show_hlevels()
def build(self, path_prefix, metadata_root_level):
self.start_t = time.time()
# (Levels 1 to 5) Determine VA drug hierarchy
print('Computing VA drug hierarchy metadata from classTree obtained from RxNav')
VA_classId = 'VA000' # Per Lee Peters -- as of Aug 6, 2018 -- NLM supports VA class ids and not NDFRT ids
d = self.rxnav.get_class_tree(VA_classId) # obtain VA hierarchy tree from NLM, VA hierarchy root 'VA000'
root_path = '\\'+path_prefix+'\\'
self.build_va_folders(d['rxclassTree'], metadata_root_level, root_path, root_path)
print('hlevel values after compute VA hierarchy'); self.show_hlevels()
print('After VA hierarchy: %d REST API requests,\nSeconds since start: %s' %
(self.rxnav.get_request_count(),str(time.time()-self.start_t)))
print('Cache hits: %s' % self.rxnav.get_cache_usage())
# find path to the MISCELLANEOUS AGENTS folder, classId 'N0000029353'
misc_agents_classId = 'XX000' # hardcode of MISCELLANEOUS AGENTS class (was NDFRT 'N0000029353')
misc_agents_path = self.classid_paths[misc_agents_classId]
print('c_name for MISCELLANEOUS classid [%s] is [%s]' %
(misc_agents_classId,self.metadata_rows[misc_agents_path]['c_name']))
# Determine generic drugs for the VA classes
self.find_generic_drugs_for_VA_classes()
print('After find generic drugs for VA: %d REST API requests,\nSeconds since start: %s' %
(self.rxnav.get_request_count(),str(time.time()-self.start_t)))
print('Cache hits: %s' % self.rxnav.get_cache_usage())
# Determine ingredients associated with generic drugs
self.find_ingredients_for_generic_drugs()
print('After find ingredients for generic drugs for VA: %d REST API requests,\nSeconds since start: %s' %
(self.rxnav.get_request_count(),str(time.time()-self.start_t)))
print('Cache hits: %s' % self.rxnav.get_cache_usage())
# Create metadata rows for ingredients and generic drugs
for va_classid in sorted(self.classid_paths.keys()):
va_classpath = self.classid_paths[va_classid]
for ingred_rxcui in self.ingred_for_va_classid[va_classid]:
my_parent_path = va_classpath
child_name = self.rxnorm_coding.get_rxcui_name(ingred_rxcui)
child_path = my_parent_path+str(ingred_rxcui)+'\\'
self.add_rxcui_metadata_child(ingred_rxcui, my_parent_path, child_path, child_name, 'IN')
for scd_rxcui in self.scd_for_ingred_for_va_classid[va_classid][ingred_rxcui]:
my_parent_path = va_classpath+str(ingred_rxcui)+'\\'
child_name = self.rxnorm_coding.get_rxcui_name(scd_rxcui)
child_path = my_parent_path+str(scd_rxcui)+'\\'
self.add_rxcui_metadata_child(scd_rxcui, my_parent_path, child_path, child_name, 'SCD')
print('Created metadata rows for ingredients and generic drugs'); self.show_hlevels()
# (Level 6) add in historical RxNorm generic drug codes, based on ingredients
# JGP -- 2018-03-01, suspending this for now.
# self.place_historical_rxnorm_codes(misc_agents_path)
# We are NOT missing any RXCUI codes by not doing this, as our historical comprehensiveness
# comes from the by-ingredient hierarchy and not the VA hierarchy.
# This method not currently sustainable (no way to determine these replacements versus
# obsoletes going forward, and knowoledge of which drug replaced which drug.
# There is no way to know if a historical code is one that should be associated with the VA class --
# as that is curated. We don't have a history of that curation.
# Find branded drugs associated with generic drugs
self.find_associated_branded_drugs()
print('Created metadata rows for branded drugs'); self.show_hlevels()
# (Level 8) Determine NDC codes for branded and generic drugs
self.find_ndc_codes_for_drugs()
return self.metadata_rows # Done
def get_ingredient_rxcui_set(self):
return set(self.ingredient_rxcui_to_paths.keys())
# end class va_metadata_builder
|
import random
from settings import *
from block import Block
from gamewall import Wall
import pygame
class GameState:
def __init__(self, screen):
self.screen = screen
self.wall = Wall(screen)
self.block = None
self.next_block = None
self.timer_interval = TIMER_INTERVAL
# self.set_timer(self.timer_interval)
self.game_score = 0
self.stopped = True
self.paused = False
self.play_times = 0
self.level = 1
def set_timer(self, timer_interval):
pygame.time.set_timer(pygame.USEREVENT, timer_interval)
def stop_timer(self):
pygame.time.set_timer(pygame.USEREVENT, 0) # clear timer
def add_score(self, score):
self.game_score += score
level = self.game_score // LEVEL_RANGE + 1
if level > self.level:
self.level += 1
if self.timer_interval >= 500:
self.timer_interval -= 50
elif 200 <= self.timer_interval < 500:
self.timer_interval -= 30
elif 100 <= self.timer_interval < 200:
self.timer_interval -= 20
elif 0 < self.timer_interval < 100:
self.timer_interval -= 10
pygame.time.set_timer(pygame.USEREVENT, self.timer_interval)
def start_game(self):
self.stopped = False
self.set_timer(TIMER_INTERVAL)
self.timer_interval = TIMER_INTERVAL
self.block = self.new_block()
self.block = self.new_block()
self.play_times += 1
self.wall.clear()
self.game_score = 0
self.paused = False
# random.seed(int(time.time()))
def new_block(self):
self.block = self.next_block
a = random.choice(BLOCK_TYPES)
self.next_block = Block(a, random.randint(0, len(BLOCK[a]) - 1), self.screen, self.wall)
return self.block
def pause_game(self):
self.stop_timer()
self.paused = True
def resume_game(self):
self.set_timer(self.timer_interval)
self.paused = False
def touch_bottom(self):
self.wall.add_to_wall(self.block)
self.add_score(self.wall.eliminate_line())
for c in range(COLUMN_NUM):
if self.wall.is_wall(0, c): # game over
self.stopped = True
break
if not self.stopped:
self.block = self.new_block()
if self.block.hit_wall():
self.stopped = True
if self.stopped:
self.stop_timer()
|
from sklearn.tree import DecisionTreeRegressor, export_graphviz, plot_tree
from sklearn.ensemble import RandomForestRegressor
from data_utils import load_adcl, standardize_data, split_dataset, nash_sutcliffe
import matplotlib.pyplot as plt
from sklearn.multioutput import MultiOutputRegressor
import xgboost as xgb
def build_decision_tree():
_model = DecisionTreeRegressor()
return _model
def build_random_forest():
_model = RandomForestRegressor(n_estimators=500, n_jobs=5)
return _model
def build_xgboost():
_model = MultiOutputRegressor(xgb.XGBRegressor(objective="reg:squaredlogerror", max_depth=10, verbosity=2))
return _model
if __name__ == '__main__':
dataset, split_target_label_index, demand_indexes, test_size, abs_levels = load_adcl()
n_steps = 1
dataset_values = dataset.values
dataset_values[:, demand_indexes[0]:demand_indexes[1]], scaler = \
standardize_data(dataset_values[:, demand_indexes[0]:demand_indexes[1]])
train, test = split_dataset(dataset_values, test_size)
model = build_random_forest() # build_xgboost()
model.fit(train[:, split_target_label_index:], train[:, :split_target_label_index])
y_true = test[0, :split_target_label_index].reshape((1, -1))
y_pred = model.predict(test[0, split_target_label_index:].reshape((1, -1)))
nash = nash_sutcliffe(y_true, y_pred)
print(nash.numpy())
# plt.figure(figsize=(50, 50))
# plot_tree(model, filled=True)
# plt.savefig("tree.pdf")
# plt.show()
# export_graphviz(model, out_file='dt.dot') |
import matplotlib
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from IPython.display import display
import os
from smif.controller.build import get_model_run_definition, build_model_run
from smif.controller.load import load_resolution_sets
from smif.data_layer.datafile_interface import DatafileInterface
from smif.data_layer.data_handle import DataHandle
from smif.model.scenario_model import ScenarioModel
handler = DatafileInterface('./')
available_modelrun = widgets.RadioButtons(
description='Model Runs:',
options=sorted([x['name'] for x in handler.read_sos_model_runs()]))
plt.ioff()
dep_ax=plt.gca()
show_dep_graph = widgets.Output()
global models
global store
global modelrun
global dep_graph
def plot_dep_graph(dep_graph):
show_dep_graph.clear_output(wait=True)
with show_dep_graph:
dep_ax.clear()
dep_graph_relabelled = nx.relabel_nodes(dep_graph, {x: x.name for x in dep_graph}, copy=True)
nx.draw(dep_graph_relabelled, ax=dep_ax, with_labels=True)
display(dep_ax.figure)
def load_model_run_results(click):
model_run_config = get_model_run_definition('./', available_modelrun.value)
global modelrun
modelrun = build_model_run(model_run_config)
global store
store = DatafileInterface('./')
global dep_graph
modelrun.sos_model.make_dependency_graph()
dep_graph = modelrun.sos_model.dependency_graph
try:
plot_dep_graph(dep_graph)
except ValueError:
pass
global models
models = modelrun.sos_model.models
initialise_viewer(dep_graph, modelrun, models)
initialise_model_viewer(dep_graph, models)
def initialise_viewer(dep_graph, modelrun, models):
year.options = modelrun.model_horizon
model.options = [x.name for x in dep_graph.nodes()]
from_model.options = [x.name for x in dep_graph.predecessors(models[model.value])]
to_model.options = [x.name for x in dep_graph[models[model.value]]]
data_in.options = get_predecessor_outputs(models, model.value, from_model.value)
data_out.options = get_predecessor_outputs(models, to_model.value, model.value)
click_from(None)
click_to(None)
load_button = widgets.Button(
description="Load Results")
load_button.on_click(load_model_run_results)
def get_predecessor_outputs(models, model_name, source_model_name):
"""
Returns
=======
list
"""
outputs = []
if model_name in models and source_model_name in models:
deps = models[model_name].deps
for x in deps.values():
if x.source_model.name == source_model_name:
outputs.append(x.source.name)
return sorted(outputs)
def get_outputs(models, model_name):
"""
Returns
=======
list
"""
outputs = []
if model_name in models:
outputs = sorted(models[model_name].outputs.names)
return outputs
def plot_subgraph(model):
d = [x for x in dep_graph.predecessors(model)]
d.append(model)
sub_graph = dep_graph.subgraph(d)
sub_graph_relabelled = nx.relabel_nodes(sub_graph, {x: x.name for x in sub_graph}, copy=True)
nx.draw(sub_graph_relabelled, with_labels=True)
def plot_results(store, modelrun, model, parameter, year, axes):
axes.clear()
handle = DataHandle(store, modelrun.name, year, modelrun.model_horizon, model,
decision_iteration=0)
spatial_resolution = model.outputs[parameter].spatial_resolution
temporal_resolution = model.outputs[parameter].temporal_resolution
if isinstance(model, ScenarioModel):
data = handle._store.read_scenario_data(
model.scenario_name, # read from scenario
parameter, # using output (parameter) name
spatial_resolution.name,
temporal_resolution.name,
year)
data = data.sum(axis=0)
# names = temporal_resolution.get_entry_names()
# plt.plot(names, data)
plt.plot(data)
else:
data = handle.get_results(parameter)
data = data.sum(axis=0)
plt.plot(data)
units = model.outputs.get_units(parameter)
axes.set_ylabel(units)
axes.set_xlabel(temporal_resolution.name)
axes.set_title(model.name + ': ' + parameter)
axes.set_ylim(0,)
display(axes.figure)
def save_data_as_csv(store, modelrun, model, parameter, year):
if isinstance(model, ScenarioModel):
msg = "No need to use this function as data is already in csv format"
raise ValueError(msg)
else:
spatial_resolution = model.outputs[parameter].spatial_resolution.name
temporal_resolution = model.outputs[parameter].temporal_resolution.name
handle = DataHandle(store, modelrun.name, year, modelrun.model_horizon,
model, decision_iteration=0)
data = handle.get_results(parameter)
results_path_dat = store._get_results_path(
modelrun.name, model.name, parameter, spatial_resolution,
temporal_resolution,
year, None, 0)
results_path = results_path_dat[:-4] + ".csv"
os.makedirs(os.path.dirname(results_path), exist_ok=True)
region_names = store.read_region_names(spatial_resolution)
interval_names = store.read_interval_names(temporal_resolution)
assert data.shape == (len(region_names), len(interval_names))
csv_data = store.ndarray_to_data_list(data,
region_names,
interval_names,
timestep=year)
store._write_data_to_csv(results_path, csv_data)
print("Writing file to %s", results_path)
def on_model_change(change):
if model.value in models:
from_model.options = [x.name for x in dep_graph.predecessors(models[model.value])]
if from_model.value in models:
data_in.options = get_predecessor_outputs(models, model.value, from_model.value)
else:
data_in.options = []
to_model.options = [x.name for x in dep_graph[models[model.value]]]
if to_model.value in models:
data_out.options = get_predecessor_outputs(models, to_model.value, model.value)
else:
data_out.options = []
def from_model_change(change):
if from_model.value in models:
data_in.options = get_predecessor_outputs(models, model.value, from_model.value)
else:
data_in.options = []
def to_model_change(change):
if to_model.value in models:
data_out.options = get_predecessor_outputs(models, to_model.value, model.value)
else:
data_out.options = []
def click_convert(b):
save_data_as_csv(store, modelrun, models[from_model.value], data_in.value, year.value)
def click_from(b):
outputs_from.clear_output(wait=True)
if from_model.value in models and model.value and data_in.value:
with outputs_from:
plot_results(store, modelrun, models[from_model.value], data_in.value, year.value, ax_from)
def click_to(b):
outputs_to.clear_output(wait=True)
if model.value in models and to_model.value and data_out.value:
with outputs_to:
plot_results(store, modelrun, models[model.value], data_out.value, year.value, ax_to)
year = widgets.Dropdown(
options=[],
description='Year:',
disabled=False)
model = widgets.Dropdown(
options=[],
description='Model:',
disabled=False
)
model.observe(on_model_change, names='value')
from_model = widgets.Dropdown(
options=[],
description='From Model:',
disabled=False)
from_model.observe(from_model_change, names='value')
to_model = widgets.Dropdown(
options=[],
description='To Model:',
disabled=False)
to_model.observe(to_model_change, names='value')
data_in = widgets.Dropdown(
options=[],
description='Data In:',
disabled=False)
data_out = widgets.Dropdown(
options=[],
description='Data Out:',
disabled=False)
outputs_from = widgets.Output()
ax_from=plt.gca()
outputs_to = widgets.Output()
ax_to=plt.gca()
button_from = widgets.Button(
description='Show')
button_to = widgets.Button(
description='Show')
button_from.on_click(click_from)
button_to.on_click(click_to)
button_convert = widgets.Button(
description="Save CSV")
button_convert.on_click(click_convert)
def initialise_model_viewer(dep_graph, models):
model_only.options = [x.name for x in dep_graph.nodes()]
outputs.options = get_outputs(models, model_only.value)
outputs_change(None)
output_ax = plt.gca()
model_only = widgets.Dropdown(
options=[],
description='Model:',
disabled=False,
)
outputs = widgets.Dropdown(
options=[],
description='Data Out:',
disabled=False,
)
plot = widgets.Output()
def model_only_change(change):
if model_only.value in models:
outputs.options = get_outputs(models, model_only.value)
def outputs_change(change):
plot.clear_output(wait=True)
if outputs.value:
with plot:
plot_results(store, modelrun, models[model_only.value], outputs.value, year.value, output_ax)
model_only.observe(model_only_change, names='value')
outputs.observe(outputs_change, names='value')
choose_modelrun = widgets.VBox([
widgets.HBox([available_modelrun, load_button]),
show_dep_graph
])
view_results = widgets.VBox([
widgets.HBox([
year, model]),
widgets.HBox([
widgets.VBox([from_model, data_in, button_from, outputs_from]),
widgets.VBox([to_model, data_out, button_to, outputs_to]),
button_convert
])
])
view_outputs = widgets.VBox([model_only, outputs, plot]) |
print('統計英文單詞出現的次數')
# Count each word in the line
def processLine(line, wordCounts):
line = replacePunctuations(line) # Replace punctuations with space
words = line.split() # Get words from each line
for word in words:
if word in wordCounts:
wordCounts[word] += 1
else:
wordCounts[word] = 1
# Replace punctuations in the line with space
def replacePunctuations(line):
for ch in line:
if ch in '~@#$%^&*()_-+=~"<>?/,.;!{}[]|':
line = line.replace(ch, " ")
return line
filename = 'C:/_git/vcs/_1.data/______test_files1/wordCounts1.txt'
infile = open(filename, "r") # Open the file
wordCounts = {} # Create an empty dictionary to count words
for line in infile:
processLine(line.lower(), wordCounts)
#print(wordCounts)
pairs = list(wordCounts.items()) # Get pairs from the dictionary
items = [[x, y] for (y, x) in pairs] # Reverse pairs in the list
items.sort() # Sort pairs in items
for i in range(len(items) - 1, len(items) - 11, -1):
print(items[i][1] + "\t" + str(items[i][0]))
|
#!/usr/bin/env python
#
# Author: Daniela Duricekova <daniela.duricekova@gmail.com>
#
import requests
import threading
URLS = [
'https://xkcd.com/138/',
'https://xkcd.com/149/',
'https://xkcd.com/285/',
'https://xkcd.com/303/',
'https://xkcd.com/327/',
'https://xkcd.com/387/',
'https://xkcd.com/612/',
'https://xkcd.com/648/'
]
class ContentLenThread(threading.Thread):
def __init__(self, url):
super().__init__()
self.url = url
self.content_len = None
def run(self):
r = requests.get(self.url)
self.content_len = len(r.text)
if __name__ == '__main__':
threads = []
for url in URLS:
t = ContentLenThread(url)
threads.append(t)
t.start()
for t in threads:
t.join()
for t in threads:
print((t.url, t.content_len))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
import json
from collections import defaultdict
def freq(seq):
fq = {}
for e in seq:
if e not in fq: fq[e] = 0
fq[e] += 1
return fq
def sorted_by_freq(seq):
byfq = [(v,k) for k,v in freq(seq).items()]
commonest = ''.join([k for v,k in sorted(byfq, reverse=True)])
return commonest
def regexify(prefix):
return re.compile('^[' + prefix + ']+$')
def make_levels(words):
levels = []
commonest_letters = sorted_by_freq(''.join(words))
limit = len(commonest_letters)
for i in range(limit):
prefix = commonest_letters[:i+1]
level = {
'new_letter' : commonest_letters[i],
'romaji' : kana2romaji(commonest_letters[i]),
'sofar' : commonest_letters[:i],
'regex' : regexify(prefix),
'number' : "%.2d" % i,
'next' : "%.2d" % (i+1) if (i < limit) else "%.2d" % 0 ,
'next_letter' : commonest_letters[(i+1) % limit],
'prev_letter' : commonest_letters[(i-1)] if i > 1 else commonest_letters[0],
'wordlist' : [],
'prev' : "%.2d" % (i-1) if (i > 0) else "%.2d" % 0,
}
levels.append( level )
return levels
def level_words(words):
levels = make_levels(words)
for word in words:
for level in levels:
if level['regex'].match(word):
if len(word) < 12 and len(level['wordlist']) < 50: level['wordlist'].append(word)
break
return levels
def read_romaji2katakana(jsonfile='katakana.json'):
rules = json.loads(open(jsonfile).read().decode('utf-8'))
return dict(rules)
def romaji2kana(romaji):
ruledict = read_romaji2katakana()
return ruledict[romaji]
def invert_dict(d):
return dict([(v,k) for k,v in d.items()])
def kana2romaji(kana):
ruledict = invert_dict(read_romaji2katakana())
if kana in ruledict: return ruledict[kana]
else: return kana
if __name__ == "__main__":
from string import Template
text = sys.stdin.read().decode('utf-8').replace('\n', ' ')
page_template = Template(open('page.template').read().decode('utf-8'))
word_template = Template(open('word.template').read().decode('utf-8'))
words = text.split()
levels = level_words(words)
for level in levels:
level['page_title'] = 'Learn Katakana: ' + level['new_letter']
level['wordlist'] = '\n'.join([word_template.substitute(word=w) for w in level['wordlist']])
page = page_template.safe_substitute(level)
outfile = "levels/%s.html" % level['number']
out = open(outfile, 'w')
out.write(page.encode('utf-8'))
out.close()
|
"""
program to calculate average marks and pecentage
"""
sub_1 = eval(input("enter the marks obtained in subject 1"))
sub_2 = eval(input("enter the marks obtained in subject 2"))
sub_3 = eval(input("enter the marks obtained in subject 3"))
sub_4 = eval(input("enter the marks obtained in subject 4"))
sub_5 = eval(input("enter the marks obtained in subject 5"))
totMks = sub_1+sub_2+sub_3+sub_4+sub_5
avg = totMks/5 # calculating the average of marks
per = totMks/5 # calculating the percentage
print("total marks are", totMks)
print("average of marks is", avg)
print("percentage of marks is", per) |
from __future__ import absolute_import
import binascii
import json
import os
from flask import Blueprint
from flask import current_app
from flask import request
from scheduler.models import Task
from scheduler.decorators import token_protected_endpoint
scheduler = Blueprint('scheduler', __name__)
@scheduler.route("/schedule", methods=['POST'])
@token_protected_endpoint
def schedule():
"""HTTP endpoint for scheduling tasks
If a task with the same code already exists, the one with the shorter
interval will be made active.
"""
code = request.form['code']
interval = int(request.form['interval'])
task_id = binascii.b2a_hex(os.urandom(5))
new_task = Task(id=task_id)
new_task.active = True
new_task.code = code
new_task.interval = interval
# TODO(derek): Assert there is only one other_task
other_task = Task.query.filter_by(code=code, active=True).first()
if other_task:
if other_task.interval <= new_task.interval:
new_task.active = False
else:
other_task.active = False
other_task.save()
current_app.scheduler.cancel(other_task.id)
if new_task.active:
print current_app.scheduler.schedule
current_app.scheduler.schedule({
'id': task_id,
'code': new_task.code,
'interval': new_task.interval
})
new_task.save()
return json.dumps({
'status': 'success',
'id': task_id,
})
@scheduler.route("/cancel", methods=['POST'])
@token_protected_endpoint
def cancel():
"""HTTP endpoint for canceling tasks
If an active task is cancelled, an inactive task with the same code and the
smallest interval will be activated if it exists.
"""
task_id = request.form['id']
task = Task.query.get(task_id)
if not task:
return json.dumps({
'status': 'success',
'id': None,
})
task.delete()
if task.active:
current_app.scheduler.cancel(task_id)
code = task.code
other_task = Task.query.filter_by(code=code).order_by('interval').first()
if other_task:
other_task.active = True
other_task.save()
current_app.scheduler.schedule({
'id': other_task.id,
'code': other_task.code,
'interval': other_task.interval
})
return json.dumps({
'status': 'success',
'id': task_id,
})
|
# conditions in python
x,y = 100,100
if(x<y):
st = "x is less than y"
elif(x==y):
st = "x is eqal to y"
else:
st = "x is greater than y"
print(st) |
def recall_password(cipher_grille, ciphered_password):
result = ''
for rotation in range(4):
for line in range(4):
for c in range(4):
if cipher_grille[line][c] == 'X':
result += ciphered_password[line][c]
cipher_grille = list(zip(*reversed(cipher_grille)))
return result
|
# get train data
# get test data
# create model
# load or init model
# compile the model
# fit data to model
# save weights
'''
A simple Conv3D with Keras for aimbot detection
'''
import argparse
import os
import cv2
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv3D, MaxPooling3D, Dropout
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.utils import plot_model
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
def video_to_array(video_name: str, no_frames=10, flip=False):
array_video = []
clip = cv2.VideoCapture(video_name)
nframe = clip.get(cv2.CAP_PROP_FRAME_COUNT)
frames = [x * nframe / no_frames for x in range(no_frames)]
for i in range(no_frames):
clip.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
ret, frame = clip.read()
frame = cv2.resize(frame, (32, 32))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if flip:
frame = cv2.flip(frame, 1)
array_video.append(frame)
clip.release()
return np.array(array_video)
# -- Command line argumests --
parser = argparse.ArgumentParser(description="A simple Conv3D for aimbot detection")
parser.add_argument("--output", type=str, required=True)
parser.add_argument("--test", type=bool, default=False)
parser.add_argument("--load", type=str, default=False)
parser.add_argument("--epochs", type=int, default=100)
args = parser.parse_args()
if args.test is True and not args.load:
raise ValueError("For a test run you need to provide a file with the weights. Use --load.")
# -- Preparatory code --
# Model configuration
batch_size = 100
no_epochs = args.epochs
learning_rate = 0.001
no_classes = 2
verbosity = 1
X_train = []
labels_train = []
if not args.test:
train_files = os.listdir("dataset_processed/train/")
progress_bar = tqdm(total=len(train_files))
for filename in train_files:
progress_bar.update(1)
if filename.endswith("-context.mp4"):
continue
file_path = os.path.join("dataset_processed/train/", filename)
label = 1 if filename.startswith("cheater") else 0
labels_train.append(label)
X_train.append(video_to_array(file_path))
labels_train.append(label)
X_train.append(video_to_array(file_path, flip=True))
progress_bar.close()
X_train = np.array(X_train).transpose((0, 2, 3, 1))
X_train = X_train.reshape((X_train.shape[0], 32, 32, 10, 1))
X_train = X_train.astype("float32")
Y_train = to_categorical(labels_train, 2)
print('X_shape:{}\nY_shape:{}'.format(X_train.shape, Y_train.shape))
X_test = []
labels_test = []
test_files = os.listdir("dataset_processed/test/")
progress_bar = tqdm(total=len(test_files))
for filename in test_files:
progress_bar.update(1)
if filename.endswith("-context.mp4"):
continue
file_path = os.path.join("dataset_processed/test/", filename)
label = 1 if filename.startswith("cheater") else 0
labels_test.append(label)
X_test.append(video_to_array(file_path))
progress_bar.close()
X_test = np.array(X_test).transpose((0, 2, 3, 1))
X_test = X_test.reshape((X_test.shape[0], 32, 32, 10, 1))
X_test = X_test.astype("float32")
Y_test = to_categorical(labels_test, 2)
print('X_shape:{}\nY_shape:{}'.format(X_test.shape, Y_test.shape))
input_shape = X_train.shape[1:] if len(X_train) else X_test.shape[1:]
# Create the model
model = Sequential()
model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform',
input_shape=(input_shape), padding="same"))
model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='softmax', padding="same"))
model.add(MaxPooling3D(pool_size=(3, 3, 3), padding="same"))
model.add(Dropout(0.25))
model.add(Conv3D(64, kernel_size=(3, 3, 3), activation='relu', padding="same"))
model.add(Conv3D(64, kernel_size=(3, 3, 3), activation='softmax', padding="same"))
model.add(MaxPooling3D(pool_size=(3, 3, 3), padding="same"))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(no_classes, activation='softmax'))
# Compile the model
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy'])
model.summary()
plot_model(model, show_shapes=True,
to_file=os.path.join('model.png'))
# Load weights if provided
if args.load is not False:
model.load_weights(args.load)
# Fit data to model
if not args.test:
history = model.fit(X_train, Y_train,
validation_data=(X_test, Y_test),
batch_size=batch_size,
epochs=no_epochs,
verbose=verbosity,
shuffle=True)
# # Generate generalization metrics
loss, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', loss)
print('Test accuracy:', acc)
if not os.path.isdir(args.output):
os.mkdir(args.output)
if not args.test:
model.save_weights(os.path.join(args.output, "3dcnn-{0}-{1}-acc-{2}.h5".format(batch_size, no_epochs, round(acc, 2))))
# # Plot history: Categorical crossentropy & Accuracy
plt.plot(history.history['loss'], label='Categorical crossentropy (training data)')
plt.plot(history.history['val_loss'], label='Categorical crossentropy (validation data)')
plt.plot(history.history['accuracy'], label='Accuracy (training data)')
plt.plot(history.history['val_accuracy'], label='Accuracy (validation data)')
plt.title('Model performance for Conv3D for aimbot detection')
plt.ylabel('Loss value')
plt.xlabel('No. epoch')
plt.legend(loc="upper left")
plt.show() |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'acc.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(595, 588)
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setGeometry(QtCore.QRect(10, 10, 571, 521))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.Debit = QtWidgets.QComboBox(self.tab)
self.Debit.setGeometry(QtCore.QRect(150, 80, 131, 22))
self.Debit.setEditable(True)
self.Debit.setObjectName("Debit")
self.Debit.addItem(None)
self.Debit.addItem("Cash")
self.Debit.addItem("Accounts Receivable")
self.Debit.addItem("Notes Receivable")
self.Debit.addItem("Supplies")
self.Debit.addItem("Equipment")
self.Debit.addItem("Accounts Payable")
self.Debit.addItem("Notes Payable")
self.Debit.addItem("Unearned Revenue")
self.Debit.addItem("Service Revenue")
self.Debit.addItem("Capital")
self.Debit.addItem("Drawings")
self.Debit.addItem("Rent Expense")
self.Debit.addItem("Salaries Expense")
self.label = QtWidgets.QLabel(self.tab)
self.label.setGeometry(QtCore.QRect(40, 80, 47, 13))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(40, 130, 47, 13))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.tab)
self.label_3.setGeometry(QtCore.QRect(40, 180, 47, 13))
self.label_3.setObjectName("label_3")
self.Value = QtWidgets.QLineEdit(self.tab)
self.Value.setGeometry(QtCore.QRect(150, 180, 131, 20))
self.Value.setObjectName("Value")
self.Credit = QtWidgets.QComboBox(self.tab)
self.Credit.setGeometry(QtCore.QRect(150, 130, 131, 22))
self.Credit.setEditable(True)
self.Credit.setObjectName("Credit")
self.Credit.addItem(None)
self.Credit.addItem("Cash")
self.Credit.addItem("Accounts Receivable")
self.Credit.addItem("Notes Receivable")
self.Credit.addItem("Supplies")
self.Credit.addItem("Equipment")
self.Credit.addItem("Accounts Payable")
self.Credit.addItem("Notes Payable")
self.Credit.addItem("Unearned Revenue")
self.Credit.addItem("Service Revenue")
self.Credit.addItem("Capital")
self.Credit.addItem("Drawings")
self.Credit.addItem("Rent Expense")
self.Credit.addItem("Salaries Expense")
self.Submit = QtWidgets.QPushButton(self.tab)
self.Submit.setGeometry(QtCore.QRect(260, 220, 75, 23))
self.Submit.setObjectName("Submit")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tableWidget = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget.setGeometry(QtCore.QRect(0, 0, 571, 501))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(1)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.tableWidget_2 = QtWidgets.QTableWidget(self.tab_3)
self.tableWidget_2.setGeometry(QtCore.QRect(-5, -9, 571, 511))
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(4)
self.tableWidget_2.setRowCount(0)
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.tableWidget_3 = QtWidgets.QTableWidget(self.tab_4)
self.tableWidget_3.setGeometry(QtCore.QRect(-10, -10, 581, 511))
self.tableWidget_3.setObjectName("tableWidget_3")
self.tableWidget_3.setColumnCount(10)
self.tableWidget_3.setRowCount(0)
self.tabWidget.addTab(self.tab_4, "")
self.ShowTrial = QtWidgets.QPushButton(Dialog)
self.ShowTrial.setGeometry(QtCore.QRect(500, 550, 75, 23))
self.ShowTrial.setObjectName("ShowTrial")
self.ShowJournal = QtWidgets.QPushButton(Dialog)
self.ShowJournal.setGeometry(QtCore.QRect(400, 550, 75, 23))
self.ShowJournal.setObjectName("ShowJournal")
self.ShowLedger = QtWidgets.QPushButton(Dialog)
self.ShowLedger.setGeometry(QtCore.QRect(300, 550, 75, 23))
self.ShowLedger.setObjectName("ShowLedger")
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.Debit.setItemText(0,_translate("Dialog",""))
self.Debit.setItemText(1, _translate("Dialog", "Cash"))
self.Debit.setItemText(2, _translate("Dialog", "Accounts Receivable"))
self.Debit.setItemText(3, _translate("Dialog", "Notes Receivable"))
self.Debit.setItemText(4, _translate("Dialog", "Supplies"))
self.Debit.setItemText(5, _translate("Dialog", "Equipment"))
self.Debit.setItemText(6, _translate("Dialog", "Accounts Payable"))
self.Debit.setItemText(7, _translate("Dialog", "Notes Payable"))
self.Debit.setItemText(8, _translate("Dialog", "Unearned Revenue"))
self.Debit.setItemText(9, _translate("Dialog", "Service Revenue"))
self.Debit.setItemText(10, _translate("Dialog", "Capital"))
self.Debit.setItemText(11, _translate("Dialog", "Drawings"))
self.Debit.setItemText(12, _translate("Dialog", "Rent Expense"))
self.Debit.setItemText(13, _translate("Dialog", "Salaries Expense"))
self.label.setText(_translate("Dialog", "Debit"))
self.label_2.setText(_translate("Dialog", "Credit"))
self.label_3.setText(_translate("Dialog", "Value"))
self.Credit.setItemText(0,_translate("Dialog",""))
self.Credit.setItemText(1, _translate("Dialog", "Cash"))
self.Credit.setItemText(2, _translate("Dialog", "Accounts Receivable"))
self.Credit.setItemText(3, _translate("Dialog", "Notes Receivable"))
self.Credit.setItemText(4, _translate("Dialog", "Supplies"))
self.Credit.setItemText(5, _translate("Dialog", "Equipment"))
self.Credit.setItemText(6, _translate("Dialog", "Accounts Payable"))
self.Credit.setItemText(7, _translate("Dialog", "Notes Payable"))
self.Credit.setItemText(8, _translate("Dialog", "Unearned Revenue"))
self.Credit.setItemText(9, _translate("Dialog", "Service Revenue"))
self.Credit.setItemText(10, _translate("Dialog", "Capital"))
self.Credit.setItemText(11, _translate("Dialog", "Drawings"))
self.Credit.setItemText(12, _translate("Dialog", "Rent Expense"))
self.Credit.setItemText(13, _translate("Dialog", "Salaries Expense"))
self.Submit.setText(_translate("Dialog", "Submit"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "Input"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", "Trial"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("Dialog", "Journal"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("Dialog", "Ledger"))
self.ShowTrial.setText(_translate("Dialog", "Trial"))
self.ShowJournal.setText(_translate("Dialog", "Journal"))
self.ShowLedger.setText(_translate("Dialog", "Ledger"))
if __name__ == "__main__":
print("0")
msg = QtWidgets.QMessageBox()
print("1")
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText("Error")
msg.setInformativeText('More information')
msg.setWindowTitle("Error")
msg.exec_()
|
from pandas import read_csv
import pandas as pd
import math, datetime
import numpy as np
import sklearn
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
from datetime import datetime, timedelta
style.use('ggplot')
df = read_csv('/../data/random_data.csv', header=0, index_col=0)
df = df[['zugkraft', 'drehwinkel']]
tot_work = []
sum_work = 0
for x in df['zugkraft']:
sum_work += x*0.5
tot_work.append(sum_work)
df['arbeit'] = (tot_work)
df['arbeit'].plot(label='Eingetreten')
forecast_col = 'arbeit'
df.fillna(-9999, inplace=True)
forecast_out = int(math.ceil(0.17*len(df)))
print('forecast_out in minutes ' + str(forecast_out/2))
df['label'] = df[forecast_col].shift(-forecast_out)
X = np.array(df.drop(['label'],1))
X = preprocessing.scale(X)
X = X[:-forecast_out]
X_lately = X[-forecast_out:]
df.dropna(inplace=True)
y = np.array(df['label'])
y = np.array(df['label'])
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)
clf_SVM = svm.SVR(kernel='linear')
clf_SVM.fit(X_train, y_train)
accuracy_SVM= clf_SVM.score(X_test,y_test)
print('accuracy_SVM ' + str(accuracy_SVM))
forecast_set = clf_SVM.predict(X_lately)
#print(forecast_set , accuracy_LinReg, forecast_out)
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_date_time = datetime.strptime(last_date, '%H:%M:%S')
next_unix = last_date_time + timedelta(seconds=30)
for i in forecast_set:
next_date = next_unix
next_unix += timedelta(seconds=30)
df.loc[next_date.time()] = [np.nan for _ in range(len(df.columns)-1)] + [i]
df['arbeit'].plot()
df['Forecast'].plot(label='Vorhergesagt', color='green')
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Arbeitslast')
plt.show() |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pyexcel
from youtube_dl import YoutubeDL
# song_dl = {
# 'default_search': 'ytsearch',
# 'max_downloads': 1
# }
# dl = YoutubeDL(song_dl)
url = 'https://www.apple.com/itunes/charts/songs/'
#1. Download webpage
song_content = urlopen(url).read().decode("utf-8")
#2. Extract ROI
soup = BeautifulSoup(song_content,'html.parser')
sec = soup.find('section','section chart-grid')
#3. Extract song list
song_list = []
li_list = sec.find_all('li')
for li in li_list:
h3 = li.h3
h4 = li.h4
songs = {}
songs['Name of Song'] = h3.string
songs['Artist'] = h4.string
song_list.append(songs)
# download = []
# download = h3.string + ' ' + h4.string
# dl.download([download])
pyexcel.save_as(records = song_list, dest_file_name = 'itunes_top100_song.xlsx')
|
global prev # use global keyword if you want to change a global variable, not just have access to it
prev = None # need to initialise prev to some value
def check_value():
print("NOW:", prev)
def change_value():
global prev # reference the global variable before making changes
prev = 'pie'
print("BEFORE CHANGES:", prev)
change_value()
check_value() |
# intento de script para importar un mbox a un csv.
# No funciona porque mi mbox es muy grande y se queda sin memoria.
# Creo que mi mbox no incluye las LABELS, por eso no las puedo importar.
# Lo dejo por si alguien lo quiere usar como base para hacer algo mejor.
# Hecho con la ayuda de GPT-4.
# To export everything from the MBOX file except the attachment contents, you can use a Python script to parse the MBOX file and save the data in a CSV file, which is easy to handle and analyze. Here's a step-by-step guide:
# The script will create a CSV file named `output.csv` containing the email data without the attachment contents. You can now open this file with any spreadsheet software like Microsoft Excel or Google Sheets to analyze the data. The output CSV file will contain columns for Subject, From, Date, To, Cc, Bcc, Message-ID, In-Reply-To, References, X-GM-Labels, X-GM-THRID, Attachments, and Body.
# The `clean_body()` function normalizes Unicode characters, decodes Quoted-Printable and Base64 encodings, removes HTML tags, and replaces multiple whitespaces with a single space. I have also improved the handling of MIME content and extraction of the desired data.
# This script should now properly clean up the text of an email body and handle different MIME content types. The output CSV file will contain columns for Subject, From, Date, To, Cc, Bcc, Message-ID, In-Reply-To, References, X-GM-Labels, X-GM-THRID, Attachments, and Body.
# The issue is that the 'X-GM-Labels' header might need to be decoded before writing it to the CSV file. You can use the `decode_header` function from the `email.header` module to do this.
# These fields are encoded using the "Quoted-Printable" encoding, and you can decode them using the `decode_header` function from the `email.header` module, which I've already added in the previous solution. To make sure all fields are properly decoded, you can use the `decode_header_value` function that I defined in the updated script. This function takes care of decoding the "Quoted-Printable" encoded fields.
# The `decode_header_value` function will decode the "Quoted-Printable" encoded fields and return a string. You can use this function to decode the "X-GM-Labels" header and write it to the CSV file.
import mailbox
import csv
import email
import re
import html
from email.header import decode_header
from multiprocessing import Pool, cpu_count
def get_attachments(message):
attachments = []
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
decoded_filename = decode_header_value(filename)
attachments.append(decoded_filename)
return attachments
def clean_body(text):
text = html.unescape(text) # Normalize Unicode characters
text = re.sub(r'<.*?>', '', text) # Remove HTML tags
# Replace multiple whitespaces with a single space
text = re.sub(r'\s+', ' ', text)
return text.strip()
def get_body(message):
body = ""
if message.is_multipart():
for part in message.walk():
if part.is_multipart():
continue
content_type = part.get_content_type()
content_disposition = part.get('Content-Disposition', None)
if content_type == 'text/plain' and content_disposition != 'attachment':
charset = part.get_content_charset() or 'utf-8'
payload = part.get_payload(decode=True).decode(
charset, errors='replace')
body += clean_body(payload)
else:
body = clean_body(message.get_payload(
decode=True).decode('utf-8', errors='replace'))
return body
# The `decode_header_value` function will decode the "Quoted-Printable" encoded fields and return a string. You can use this function to decode the "X-GM-Labels" header and write it to the CSV file.
def decode_header_value(header_value):
if header_value is None:
return None
decoded_values = decode_header(header_value)
decoded_value = " ".join(
[
value.decode(encoding, errors="replace")
if isinstance(value, bytes) and encoding is not None and encoding != "unknown-8bit"
else (value.decode("utf-8", errors="replace") if isinstance(value, bytes) else value)
for value, encoding in decoded_values
]
)
return decoded_value
def mbox_messages(mbox):
for message in mbox:
yield message
def process_message(args):
index, message = args
# print(f"Processing message {index+1}/{total_messages}")
msg = message
subject = decode_header_value(msg['subject'])
sender = decode_header_value(msg['from'])
date = decode_header_value(msg['date'])
recipients = decode_header_value(msg['to'])
cc = decode_header_value(msg['cc'])
bcc = decode_header_value(msg['bcc'])
msg_id = decode_header_value(msg['message-id'])
in_reply_to = decode_header_value(msg['in-reply-to'])
references = decode_header_value(msg['references'])
gm_labels = decode_header_value(msg['x-gm-labels'])
gm_thrid = decode_header_value(msg['x-gm-thrid'])
attachments = get_attachments(msg)
body = get_body(msg)
return index, [subject, sender, date, recipients, cc, bcc, msg_id, in_reply_to, references, gm_labels, gm_thrid, attachments, body]
if __name__ == '__main__':
# Replace with your MBOX file path
mbox_file = '/Users/joseanu/Downloads/All mail Including Spam and Trash-002.mbox'
csv_output = 'output.csv' # Name of the output CSV file
mbox = mailbox.mbox(mbox_file)
# total_messages = len(mbox) # Total number of emails in the mbox file (slow)
messages = mbox_messages(mbox)
with open(csv_output, 'w', newline='', encoding='utf-8') as csvfile:
csv_writer = csv.writer(csvfile, escapechar='\\', quoting=csv.QUOTE_NONE)
header_list = ['Subject', 'From', 'Date', 'To', 'Cc', 'Bcc', 'Message-ID',
'In-Reply-To', 'References', 'X-GM-Labels', 'X-GM-THRID', 'Attachments', 'Body']
csv_writer.writerow(header_list)
with Pool(cpu_count()) as pool:
indexed_messages = enumerate(messages)
for index, row in pool.imap_unordered(process_message, indexed_messages):
# gm_labels = row[9]
# print(
# f"Processed message {index+1}/{total_messages}: Labels = {gm_labels}")
csv_writer.writerow(row)
# will this work? I don't know. I'm just trying to get it to work. |
# coding=utf-8
# Time: 2019-09-29-17:38
# Author: dongshichao
'''
给定两个数组,编写一个函数来计算它们的交集。
示例 1:
输入: nums1 = [1,2,2,1], nums2 = [2,2]
输出: [2,2]
'''
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
count={}
res=[]
for i in nums1:
if i not in count:
count[i] = 1
else:
count[i] += 1
for i in nums2:
if i in count and count[i] >0 :
res.append(i)
count[i] -=1
return res
if __name__ == "__main__":
s= Solution()
print(s.intersect([1,4,6,8,3,5],[2,4,5,6,3])) |
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField, FieldList, FormField, HiddenField
from back_end.calc import calc_event_positions
from back_end.interface import get_event, save_event_score, get_event_card, \
get_player, is_event_result_editable, get_event_scores, save_event_result
class EventCardItemForm(FlaskForm):
hole = IntegerField(label='Hole')
par = IntegerField(label='Par')
si = IntegerField(label='SI')
shots = IntegerField(label='Shots')
points = IntegerField(label='Points')
class EventCardForm(FlaskForm):
scoresOut = FieldList(FormField(EventCardItemForm))
scoresIn = FieldList(FormField(EventCardItemForm))
event_name = StringField(label='event_name')
player = StringField(label='Player')
handicap = StringField(label='Handicap')
editable = HiddenField(label=None, id='editable')
totalShotsOut = StringField(label='TotalShotsOut')
totalPointsOut = StringField(label='TotalPointsOut')
totalShotsIn = StringField(label='TotalShotsIn')
totalPointsIn = StringField(label='TotalPointsIn')
totalShots = StringField(label='TotalShots')
totalPoints = StringField(label='TotalPoints')
totalShotsReturn = HiddenField(label=None, id='totalShotsReturn')
totalPointsReturn = HiddenField(label=None, id='totalPointsReturn')
positionReturn = HiddenField(label=None, id='positionReturn')
handicapReturn = HiddenField(label=None, id='handicapReturn')
statusReturn = HiddenField(label=None, id='statusReturn')
save_card = SubmitField(label='Save')
def populate_card(self, event_id, player_id, position, handicap, status):
event = get_event(event_id)
course_data = event.course.course_data_as_of(event.date.year)
card = get_event_card(event_id, player_id)
self.event_name.data = event.full_name()
self.player.data = get_player(player_id).full_name()
self.handicap.data = handicap
self.positionReturn.data = position
self.handicapReturn.data = handicap
self.statusReturn.data = status
self.editable.data = is_event_result_editable(event)
holes = range(1, 19)
for hole in holes:
i = hole - 1
shots = "-" if card[i] == 99 or card[i] is None else card[i]
item_form = EventCardItemForm()
item_form.hole = hole
item_form.par = course_data.par[i]
item_form.si = course_data.si[i]
item_form.shots = shots
item_form.points = 0
if hole <= 9:
self.scoresOut.append_entry(item_form)
else:
self.scoresIn.append_entry(item_form)
def save_event_card(self, event_id, player_id, form):
errors = self.errors
if len(errors) > 0:
return False
card = [d['shots'] for d in form.scoresOut.data] + [d['shots'] for d in form.scoresIn.data]
card = [99 if v is None else v for v in card]
total_shots = form.totalShotsReturn.data
total_points = form.totalPointsReturn.data
save_event_score(event_id, int(player_id), 0, card, total_shots, total_points)
all_scores = get_event_scores(event_id)
result = calc_event_positions(event_id, all_scores)
save_event_result(event_id, result)
return True
|
n = input()
def dist(x, y):
return (x[0]-y[0])**2 + (x[1]-y[1])**2
points = []
ans = 0
for _ in xrange(n):
point = map(int, raw_input().split())
for xy in points:
ans = max(ans, dist(xy, point))
print ans
points.append(point)
|
from __future__ import unicode_literals
from django.db import models
from django.core.exceptions import (
MultipleObjectsReturned, ObjectDoesNotExist)
from ipm_system.core.utils import gera_license
import hashlib
import pickle
# Create your models here.
class Hardware(models.Model):
class Meta:
app_label = 'core'
verbose_name = u'Hardware'
verbose_name_plural = u'Hardwares'
ordering = ('name',)
customer = models.ForeignKey('core.Customer')
name = models.CharField(verbose_name="Nome",
default="", max_length=255)
key = models.CharField(max_length=255, default='')
def __unicode__(self):
return u"Hardware %s - %s" % (self.name, self.customer)
def save(self, *args, **kwargs):
if not self.key:
h = hashlib.md5()
h.update(
self.__unicode__().encode('utf-8')
)
self.key = h.hexdigest()
super(Hardware, self).save(*args, **kwargs)
def get_license(self):
try:
license = self.license_set.get()
except MultipleObjectsReturned:
lics = self.license_set.filter()
license = lics[0]
for c in lics[1:]:
c.delete()
except ObjectDoesNotExist:
license = self.license_set.create(customer=self.customer,
hardware=self)
license.license = pickle.dumps(gera_license())
license.save()
return license
|
from rest_framework.renderers import BrowsableAPIRenderer
class NoFormBrowsableAPIRenderer(BrowsableAPIRenderer):
"""
We don't want the HTML forms and filters to be rendered in the browsable API.
It can be very slow for Porras for example, where the view tries to load all Dorsals.
The browsable API is only used in DEBUG mode anyway, so it only affects development.
"""
def show_form_for_method(self, view, method, request, obj):
return False # pragma: no cover
|
# Solve a 2D logistic regression problem to generate a dataframe
# with columns: iterations, epochs, time and weights
# from time import time
# input
# >>>> plot2D conv_2D.csv
# Example of `input.csv' for n_samples = 100
# with gradient descent (so 1 epoch per iteration)
# iteration, epoch, time, beta0, beta1
# 0, 0, .0, 1., 1.
# 1, 0, .2, 1., .5
# 2, 0, .6, 1., .0
# 3, 0, 1.2, .0, .0
# Problem: problem.loss function is required...
# if __name__ == "__main__":
import numpy as np
import pandas as pd
import os
df2D = pd.DataFrame(np.array([[0, 0, .0, 1., 1.],
[1, 100, .2, 1., .5],
[2, 200, .6, 1., .0],
[3, 300, 1.2, .0, .0]]),
columns=["iteration", "epoch", "time", "beta0", "beta1"])
df2D = df2D.astype({"iteration": int,
"epoch": float,
"time": float,
"beta0": float,
"beta1": float})
df2D
folder = os.path.join(os.getcwd(), "conv_tables")
full_path = os.path.join(folder, "conv_2D.csv")
df2D.to_csv (full_path, index=False, header=True)
df3D = pd.DataFrame(np.array([[0, 0, .0, 1., 1., 1.],
[1, 100, .2, 1., .5, 1.],
[2, 200, .6, 1., .0, 1.],
[3, 300, 1.2, .0, .0, .5],
[4, 400, 2.4, .0, .0, .0]]),
columns=["iteration", "epoch", "time", "beta0", "beta1", "beta2"])
df3D = df3D.astype({"iteration": int,
"epoch": float,
"time": float,
"beta0": float,
"beta1": float})
df3D
folder = os.path.join(os.getcwd(), "conv_tables")
full_path = os.path.join(folder, "conv_3D.csv")
df3D.to_csv (full_path, index=False, header=True)
|
# -*- coding:utf-8 -*-
class BaseNode(object):
# 结点基本属性
def __init__(self, item):
self.item = item # 存放数据元素
self.next = None # 下一个结点地址(结点对象)
class SingleLinkList(object):
""" 单向链表 """
# 链表基本属性,自动当前链表获取头结点信息
def __init__(self, node=None):
""" 链表头结点__haed可能为空 """
self.__head = node # 单向列表特点只知道自己的头结点
def is_empty(self):
""" 判断单向链表头部是否为空 """
return self.__head is None
def length(self):
""" 求单向链表的长度 """
# 当前头结点
cur = self.__head
# 设计计数初始值为0
if self.is_empty(): # 链表头都没有为空
return 0
else:
count = 0
while cur is not None: # 不能.next
count += 1
cur = cur.next # 外部变量重新赋值,下一次循环 cur 就是 next 的对象
# 返回总计数
return count
def travel(self):
""" 打印展示所有链表内容 """
cur = self.__head
print("self.__head -->", cur) # cur 一个BaseNode next 另一个BaseNode, item 值
if self.is_empty(): # 等同于 self.__head is None
print("")
else:
while cur is not None: # 不能.next
print(cur.item, end=" ")
cur = cur.next
print("")
def search(self, item):
""" 搜索 """
cur = self.__head
if self.is_empty(): # 等同于 self.__head is None
return False
else:
while cur is not None: # 下一项不为空没到最后一个
if cur.item == item:
return True # --> 返回找到即可
cur = cur.next # 切记!下一项赋值给外部变量下一次循环使用
return False # 循环完一个也没找到
def add(self, item):
""" 头部添加BaseNode """
node = BaseNode(item)
node.next = self.__head # next 是头结点node对象
self.__head = node # 自己的头结点改为新插入的结点
def append(self, item):
""" 尾部追加 """
node = BaseNode(item) # 结点对象
if self.is_empty(): # self.__head is None
self.__head = node # 空链表追加一个
else:
cur = self.__head
while cur.next is not None:
cur = cur.next # 循环赋值变量相对于
# 循环完毕到最后一个,cur.next 是空! now cur 是尾结点
cur.next = node
def insert(self, pos, item):
node = BaseNode(item) # 结点对象
if pos <= 0:
self.add(item)
elif pos >= self.length():
self.append(item)
# 中间位置
else:
cur = self.__head
count = 0 # 标签: 计数器
while count < (pos - 1):
count += 1
cur = cur.next
# 属性修改 # 当前cur为更改位置前一个元素, 修改属性
node.next = cur.next # 一定要在 cur.next = node 之前
cur.next = node # 承上启下
def remove(self, item):
""" 单向链表删除 """
if self.is_empty():
return False # 删除失败
else:
cur = self.__head
pre = None # 自己定义的上个结点,为了方便寻找
while cur is not None:
if cur.item == item:
# 头结点:
if cur == self.__head:
self.__head = cur.next # 直接给接班人
# pre = None --> 省略不写
# 其他位置,pre 肯定不为None
else:
# 只需把上一个元素的next 改为我的next 因为要删除自己
pre.next = cur.next # 退位让贤,接班人旧人交接后来新人pre.next 是 cur.next
return True # or break 删除成功
# 异常情况
pre = cur
cur = cur.next #
if __name__ == '__main__':
# node = BaseNode(100)
# print(node.item)
# print(node.next)
lian = SingleLinkList()
lian.add(1)
lian.add(2)
lian.add(3)
lian.add(4)
# lian.append(3123123)
# print(lian.is_empty())
# print(lian.length())
# print(lian.search(7))
# lian.insert(666)
lian.remove(1)
lian.travel()
|
#!/usr/bin/env python
# Copyright (C) 2008-2011 University of Maryland
# All rights reserved.
# See LICENSE.txt for details.
# Author: Christopher Metting
# changed
"""
This script starts the OsRefl Off-Specular Reflectometry application.
"""
import os
import sys
import matplotlib
matplotlib.use('WXAgg')
# Normally the inversion package will be installed, but if it is not installed,
# augment sys.path to include the parent directory of the package. This
# assures that the module search path will include the package namespace and
# allows the application to be run directly from the source tree, even if the
# package has not been installed.
try:
import osrefl
except:
this_dir_path = os.path.dirname(os.path.abspath(__file__))
if os.path.basename(this_dir_path) == 'osrefl':
sys.path.insert(1, (os.path.dirname(this_dir_path)))
else:
print """\
*** To run this script, either install the inversion package or
*** place this module in the top-level directory of the package."""
if __name__ == "__main__":
if len(sys.argv) == 1:
file = os.path.join('examples', 'AuFit.py')
else:
file = sys.argv[1]
try:
f = open(file)
f.close()
except:
print "*** Script not found:", file
sys.exit()
print "Executing script", file, "...\n"
execfile(file)
|
import random
from state import *
class Searcher:
""" A class for objects that perform random state-space
search on an Eight Puzzle.
This will also be used as a superclass of classes for
other state-space search algorithms.
"""
def __init__(self, init_state, depth_limit):
'''constructs a new Searcher object'''
self.states = [init_state]
self.num_tested = 0
self.depth_limit = depth_limit
def should_add(self, state):
'''takes a State object called state and returns True if the called
Searcher should add state to its list of untested states, and False
otherwise.
'''
if (self.depth_limit != -1 and state.num_moves > self.depth_limit):
return False
if state.creates_cycle():
return False
return True
def add_state(self, new_state):
'''adds takes a single State object called new_state and adds it to the
Searcher‘s list of untested states
'''
self.states.append(new_state)
def add_states(self, new_states):
'''takes a list State objects called new_states, and that processes
the elements of new_states one at a time
'''
for s in new_states:
if self.should_add(s):
self.add_state(s)
def next_state(self):
""" chooses the next state to be tested from the list of
untested states, removing it from the list and returning it
"""
s = random.choice(self.states)
self.states.remove(s)
return s
def find_solution(self):
'''performs a full random state-space search, stopping when the
goal state is found or when the Searcher runs out of untested
states.
'''
while self.states != []:
s = self.next_state()
self.num_tested += 1
if s.is_goal():
return s
else:
self.add_states(s.generate_successors())
return None
def __repr__(self):
""" returns a string representation of the Searcher object
referred to by self.
"""
# You should *NOT* change this method.
s = str(len(self.states)) + ' untested, '
s += str(self.num_tested) + ' tested, '
if self.depth_limit == -1:
s += 'no depth limit'
else:
s += 'depth limit = ' + str(self.depth_limit)
return s
######## Searchers ######
class BFSearcher(Searcher):
'''a subclass of the Searcher, BFS performs breadth-first
search instead of random search
'''
def next_state(self):
'''overrides the next_state method that is inherited from Searcher.
this version of next_state follows FIFO ordering,
choosing the state that has been in the list the longest.
'''
s = self.states[0]
self.states.remove(s)
return s
class DFSearcher(Searcher):
'''a subclass of the Searcher, DFS performs depth-first
search instead of random search
'''
def next_state(self):
'''overrides the next_state method that is inherited from Searcher.
this version of next_state follows LIFO ordering,
choosing the state that has been in the list the shortest.
'''
s = self.states[-1]
self.states.remove(s)
return s
class GreedySearcher(Searcher):
'''a subclass of the Searcher, GreedySearcher performs greedy
search instead of random search using a heuristic
'''
def priority(self, state):
'''takes a State object called state, and that computes and
returns the priority of that state
'''
if self.heuristic == 1:
pri = -1 * (state.board.sum_dis())
elif self.heuristic == 2:
pri = -1 * (state.board.sum_dis() + state.board.find_min_dis())
else:
pri = -1 * state.board.num_misplaced()
return pri
def __init__(self, init_state, heuristic, depth_limit):
""" constructor for a GreedySearcher object
inputs:
* init_state - a State object for the initial state
* heuristic - an integer specifying which heuristic
function should be used when computing the priority
of a state
* depth_limit - the depth limit of the searcher
"""
self.heuristic = heuristic
self.states = [[self.priority(init_state), init_state]]
self.num_tested = 0
self.depth_limit = depth_limit
def add_state(self, state):
'''overrides the add_state method that is inherited from Searcher.
adds a sublist that is a [priority, state] pair
'''
self.states.append([self.priority(state), state])
def next_state(self):
'''overrides the next_state method that is inherited from Searcher.
this chooses one of the states with the highest priority.
'''
s = max(self.states)
self.states.remove(s)
s = s[1]
return s
class AStarSearcher(GreedySearcher):
'''a subclass of the GreedySearcher, AStarSearcher performs
search instead of random search using a heuristic
'''
def priority(self, state):
'''overrides priority method in GreedySearcher ,takes a State
object called state, and that computes and
returns the priority of that state
'''
if self.heuristic == 1:
pri = -1 * (state.board.sum_dis() + state.num_moves)
elif self.heuristic == 2:
pri = -1 * (state.board.sum_dis() + state.num_moves + state.board.find_min_dis())
else:
pri = -1 * (state.board.num_misplaced() + state.num_moves)
return pri
|
import sys
import logging
import logging.handlers
from random import random
import string
import datetime
import tempfile
import codecs
from io import StringIO
|
#!/usr/bin/python3
# -*-coding:Utf-8 -*
#Deus, in adjutorium meum intende
"""Add non breaking space when necessary
"""
import re
def add_non_breaking_space_for(file_name):
with open(file_name) as f:
content = f.read()
content = add_non_breaking_space(content)
with open(file_name,'w') as f:
f.write(content)
def add_non_breaking_space(data: str) -> str:
"""
data = re.sub(r'(?<! ):',' :',data)
data = re.sub(r'(?<! )!',' !',data)
data = re.sub(r'(?<! )\?',' ?',data)
data = re.sub(r'(?<! );',' ;',data)
return data
"""
chars = "! \? ; :".split()
for c in chars:
regex = r'(?<! )'+c
replacement = r' '+c
data = re.sub(regex, replacement, data)
return data
|
from pytest import approx
from math import sqrt
def test_multiplciation():
a, b = 2, 3
n = 1/2
assert (a * b) ** n == approx((a ** n) * (b ** n))
def test_exponentiation():
a = 2
n, m = 1/2, 4
assert (a ** n) ** m == approx((a ** m) ** n)
def test_roots_algo():
assert roots(4, 2) == approx(2)
assert roots(7, 8) == approx(7 ** (1 / 8))
assert roots(.1, 8) == approx(.1 ** (1 / 8))
assert roots(-8, 3) == approx(-8 ** (1 / 3))
def roots(a, n):
x, e, m = 1, 1e-8, n - 1
while abs(x - (x :=(m * x + a / x ** m) / n)) > e:
pass
return x
def test_identities_formula():
a, b = 2, 3
assert sqrt(a) - sqrt(b) \
== approx((sqrt(a) - sqrt(b)) * (sqrt(a) + sqrt(b)) / (sqrt(a) + sqrt(b))) \
== approx((a - b) / (sqrt(a) + sqrt(b)))
assert (sqrt(a) - sqrt(b)) ** -1 \
== approx(1 / (sqrt(a) - sqrt(b))) \
== approx((sqrt(a) + sqrt(b)) / (a -b))
|
"""QJ Exception Classes"""
class QJException(Exception):
"""Base exception class for all QJ thrown exceptions"""
class JobInvalid(QJException):
"""A specified Job is invalid"""
class JobNotFound(QJException):
"""A specified Job could not be found"""
class JobVersionNotFound(QJException):
"""A specified JobVersion could not be found"""
class ActiveJobVersionNotFound(QJException):
"""An active JobVersion for a specified Job could not be found"""
class JobQueryMissingAccountId(QJException):
"""A Job's query is missing the required account_id field"""
class JobQueryInvalid(QJException):
"""A Job's query is invalid SPARQL"""
class ResultSetNotFound(QJException):
"""A specified ResultSet could not be found"""
class ResultSetResultsLimitExceeded(QJException):
"""The number of Results in a ResultSet exceeds the configured maximum"""
class ResultSizeExceeded(QJException):
"""The size of an individual result exceeds the configured maximum"""
class RemediationError(Exception):
"""An error during Remediation"""
|
n = int(input())
if ((n%400 == 0) or (n%4 == 0 and n%100 != 0)):
print("YES")
else:
print("NO") |
def max_prod(a):
print(a)
mx1 = a[0]
mn1 = a[0]
maxAns = a[0]
for i in range(1, len(a)):
t = a[i]
mx = mx1
mn = mn1
mx1 = max(max(mx*t, t), mn*t)
mn1 = min(min(mx*t, t), mn*t)
maxAns = max(mx1, maxAns)
print(mx1, mn1, maxAns)
return maxAns
a=[2,3,-2,-4]
print(max_prod(a))
a=[2,3,-2,4]
print(max_prod(a)) |
from unittest import mock
import pytest
from albu_scheduler import TransformMultiStepScheduler, TransformSchedulerOnPlateau
class TestTransformStepScheduler:
def test_ok(self, image):
transforms = [mock.MagicMock() for _ in range(4)]
scheduled_transform = TransformMultiStepScheduler(
transforms=transforms, milestones=[0, 5, 10]
)
scheduled_transform(image=image)
transforms[0].assert_called_with(image=image)
for _ in range(5):
scheduled_transform.step()
scheduled_transform(image=image)
transforms[1].assert_called_with(image=image)
transforms[2].assert_not_called()
transforms[3].assert_not_called()
def test_no_zero_milestone(self):
transforms = [mock.MagicMock() for _ in range(4)]
scheduled_transform = TransformMultiStepScheduler(
transforms=transforms, milestones=[5, 10]
)
assert scheduled_transform.cur_transform.__class__.__name__ == "NoOp"
def test_too_much_milestones_fails(self):
transforms = [mock.MagicMock()]
milestones = [i for i in range(100)]
with pytest.raises(ValueError):
TransformMultiStepScheduler(transforms=transforms, milestones=milestones)
class TestTransformSchedulerOnPlateau:
@pytest.mark.parametrize(
"mode, metric_values",
[("max", [1, 2, 3, 3, 3, 1]), ("min", [10, 9, 8, 8, 8, 100])],
)
def test_ok(self, image, mode, metric_values):
transforms = [mock.MagicMock() for _ in range(4)]
scheduled_transform = TransformSchedulerOnPlateau(
transforms=transforms, mode=mode, patience=2
)
scheduled_transform(image=image)
transforms[0].assert_called_with(image=image)
for metric_value in metric_values[:-1]:
scheduled_transform.step(metric_value)
scheduled_transform(image=image)
transforms[0].assert_called_with(image=image)
transforms[0].reset_mock()
scheduled_transform.step(metric_values[-1])
scheduled_transform(image=image)
transforms[1].assert_called_with(image=image)
for _ in range(100):
scheduled_transform(image=image)
transforms[2].assert_not_called()
|
# -*- coding: utf-8 -*-
#11724. 연결요소의 개수
import sys
sys.setrecursionlimit(10**6)
class Graph:
def __init__(self, N):
self.N=N
self.relation=[ [] for _ in range(N)] #관계리스트
self.visited=[False]*N #방문리스트
def dfs(self, v):
if not self.visited[v-1]:
self.visited[v-1]=True #노드방문
#노드 v와 연결된 노드를 찾는다.
#연결되 노드는 방문하지 않은 상태여야한다.
for x in self.relation[v-1]:
if not self.visited[x-1]:
self.dfs(x)
def main():
N,M= map(int, sys.stdin.readline().split())
cnt=0
if (M>=0) and (M<=N*(N-1)//2):
g=Graph(N) #그래프 객체 생성
for _ in range(M):#그래프 vertex간의 연결
u,v=map(int, sys.stdin.readline().split())
g.relation[u-1].append(v)
g.relation[v-1].append(u)
#노드 v와 연결된 노드들 탐색
#아직 v가 탐색되지 않은 상태라면 cnt증가
for v in range(1,N+1):
if not g.visited[v-1]:
g.dfs(v)
cnt+=1
print(cnt)
if __name__=='__main__':
main()
|
lst = [2, 3, 4]
a = "beautiful numbers to follow: {}".format(" ".join(lst))
a = "beautiful numbers to follow: {}".format("\t".join(lst))
a = "beautiful numbers to follow: {}".format(' '.join(lst))
a = "beautiful numbers to follow: {}".format('\n'.join(lst))
b = 'beautiful numbers to follow: {}'.format(" ".join(lst))
b = 'beautiful numbers to follow: {}'.format("\n".join(lst))
b = 'beautiful numbers to follow: {}'.format(' '.join(lst))
b = 'beautiful numbers to follow: {}'.format(','.join(lst)) |
from sys import argv
from bottle import route, run, static_file, template
import bottle as b
@route('/')
def index():
return template("index.html", root='')
@route('/css/<filename:re:.*css>')
def css(filename):
return static_file(filename, root='css')
@route('/js/<filename:re:.*js>')
def js(filename):
return static_file(filename, root='js')
@route('/images/<filename>')
def images(filename):
return static_file(filename, root='images')
@route('/sound/<filename>')
def sound(filename):
return static_file(filename, root='sound')
def main():
b.run(host='0.0.0.0', port=argv[1])
if __name__ == '__main__':
main()
|
# -*- coding: utf8 -*-
import tornado.web
from lib import userstruct
import time
import random
import hashlib
from lib.log import logger
def md5(txt):
m = hashlib.md5()
m.update(txt)
return m.hexdigest()
def handle(param):
ret = 0
userid = param.get('userid')
skey = param.get('skey')
otherid = param.get('otherid')
logger.info("%s,%s", userid, skey)
if userid and skey and otherid:
tmp = userstruct.read_redis(userid)
if not tmp or tmp.skey != skey:
return {'ret':0, 'data':{'des': 'skey error'}}
userstruct.write_redis_dict(tmp.userid, {})
other = userstruct.read_user(otherid)
if not other:
return {'ret':0, 'data':{'des': 'otherid error'}}
return {'ret':1, 'data':other.todict()}
return {'ret':0, 'data': {}} |
import json
import os
import logging
logger = logging.getLogger("vaex.webserver")
def exception(exception):
logger.exception("handled exception at server, all fine: %r", exception)
return ({"exception": {"class": str(exception.__class__.__name__), "msg": str(exception)}})
def error(msg):
return ({"error": msg})
def get_overrides():
return json.loads(os.environ.get('VAEX_SERVER_OVERRIDE', '{}'))
def hostname_override(hostname):
overrides = get_overrides()
if hostname in overrides:
override = overrides[hostname]
logger.warning('overriding hostname %s with %s', hostname, override)
return override
else:
return hostname
|
print('======= Desafio 4 =======')
n1 = int(input('Digite um numero:'))
n2 = int(input('Digite outro numero:'))
r = n1 + n2
print('A soma entre {} e {} tem o resultado {}'.format(n1, n2, r))
|
### Parte de leer los datos de hdfs ###
import pandas as pd
import numpy as np
import pydoop.hdfs as hd
from lxml import objectify
with hd.open("/user/datostiempo/20160525_1341.xml") as archivo:
parsed = objectify.parse(archivo)
root = parsed.getroot()
prob_precipitacion = []
estado_cielo =[]
viento = []
temperatura = []
tempmax = []
tempmin = []
iteraccion = 0
errores = []
print "root : ", root
for row in root.prediccion.dia:
for row_precipitacion in row.prob_precipitacion:
aux_precipitacion = []
if (row_precipitacion != ''):
aux_precipitacion.append(row_precipitacion)
else:
errores.append(1)
prob_precipitacion.append(str(sum(aux_precipitacion) / float(len(aux_precipitacion))))
for row_cielo in row.estado_cielo:
aux_cielo = []
if (row_cielo != ''):
try:
int(row_cielo)
aux_cielo.append(row_cielo)
except ValueError:
errores.append(1)
else:
errores.append(1)
estado_cielo.append(str(sum(aux_cielo) / len(aux_cielo)))
for row_viento in row.viento:
aux_viento = []
if (row_viento.velocidad != ''):
aux_viento.append(row_viento.velocidad)
else:
errores.append(1)
viento.append(str(sum(aux_viento) / float(len(aux_viento))))
for row_temp in row.temperatura:
aux_temp = []
tempmax.append(str(row_temp.maxima))
tempmin.append(str(row_temp.minima))
if (iteraccion < 2):
for datos in row_temp.dato:
aux_temp.append(datos)
temperatura.append(str(sum(aux_temp) / float(len(aux_temp))))
iteraccion = iteraccion + 1
fila = prob_precipitacion+estado_cielo+viento+tempmax+tempmin
### Parte de meter los datos en hbase con el conector ###
import csv
import happybase
import time
batch_size = 1000
#host = "192.168.1.108"
host = 'localhost'
namespace = "calidadaire"
row_count = 0
start_time = time.time()
table_name = "medicion_tiempo"
def connect_to_hbase():
""" Connect to HBase server.
This will use the host, namespace, table name, and batch size as defined in
the global variables above.
"""
conn = happybase.Connection(host = host,
table_prefix = namespace,
table_prefix_separator = ":")
conn.open()
table = conn.table(table_name)
batch = table.batch(batch_size = batch_size)
return conn, batch
def insert_row(batch, row):
""" Insert a row into HBase.
Write the row to the batch. When the batch size is reached, rows will be
sent to the database.
Rows have the following schema:
[ precipitacion:1-7,estado_cielo:1-7,viento:1-7,
temperatura_maxima:1-7,temperatura_minima:1-7 ]
"""
batch.put("20160621",{"precipitaciones:1":row[0],"precipitaciones:2":row[1],"precipitaciones:3":row[2],"precipitaciones:4":row[3],"precipitaciones:5":row[4],"precipitaciones:6":row[5],"precipitaciones:7":row[6],
"estado_cielo:1":row[7],"estado_cielo:2":row[8],"estado_cielo:3":row[9],"estado_cielo:4":row[10],"estado_cielo:5":row[11],"estado_cielo:6":row[12],"estado_cielo:7":row[13],
"viento:1":row[14],"viento:2":row[15],"viento:3":row[16],"viento:4":row[17],"viento:5":row[18],"viento:6":row[19],"viento:7":row[20],
"temperatura_maxima:1":row[21],"temperatura_maxima:2":row[22],"temperatura_maxima:3":row[23],"temperatura_maxima:4":row[24],"temperatura_maxima:5":row[25],"temperatura_maxima:6":row[26],"temperatura_maxima:7":row[27],
"temperatura_minima:1":row[28],"temperatura_minima:2":row[29],"temperatura_minima:3":row[30],"temperatura_minima:4":row[31],"temperatura_minima:5":row[32],"temperatura_minima:6":row[33],"temperatura_minima:7":row[34]})
# After everything has been defined, run the script.
conn, batch = connect_to_hbase()
# print "Connect to HBase. table name: %s, batch size: %i" % (table_name, batch_size)
try:
insert_row(batch, fila)
batch.send()
finally:
# No matter what happens, close the file handle.
conn.close()
|
import matplotlib.pyplot as plt
import numpy as np
from math import *
n= np.arange(0,38, 0.1);
x = np.sin((pi*n)/(19))
plt.plot(n,np.abs(x))
plt.grid()
plt.xlabel("n")
plt.ylabel("|x[n]|")
plt.show() |
# solution of heat eqtn via CN method
import matplotlib.pylab as p;
from mpl_toolkits.mplot3d import Axes3D ;
from numpy import *;
import numpy;
Max = 51; n = 50; m = 50
Ta = zeros((Max),float); Tb =zeros((Max),float); Tc = zeros((Max),float)
Td = zeros((Max),float); a = zeros((Max),float); b = zeros((Max),float)
c = zeros((Max),float); d = zeros((Max),float); x = zeros((Max),float)
t = zeros( (Max, Max),float)
def Tridiag(a, d, c, b, Ta, Td, Tc, Tb, x, n):
Max = 51
h = zeros( (Max), float )
p = zeros( (Max), float )
for i in range(1,n+1):
a[i] = Ta[i]
b[i] = Tb[i]
c[i] = Tc[i]
d[i] = Td[i]
h[1] = c[1]/d[1]
p[1] = b[1]/d[1]
for i in range(2,n+1):
h[i] = c[i] / (d[i]-a[i]*h[i-1])
p[i] = (b[i] - a[i]*p[i-1]) / (d[i]-a[i]*h[i-1])
x[n] = p[n]
for i in range( n - 1, 1,-1 ): x[i] = p[i] - h[i]*x[i+1]
width = 1.0; height = 0.1; ct = 1.0
for i in range(0, n): t[i,0] = 0.0
for i in range( 1, m): t[0][i] = 0.0
h = width / ( n - 1 )
k = height / ( m - 1 )
r = ct * ct * k / ( h * h )
for j in range(1,m+1):
t[1,j] = 0.0
t[n,j] = 0.0 # BCs
for i in range( 2, n): t[i][1] = sin( pi * h *i) # ICs
for i in range(1, n+1): Td[i] = 2. + 2./r
Td[1] = 1.; Td[n] = 1.
for i in range(1,n ): Ta[i] = -1.0; Tc[i] = -1.0; # Off diagonal
Ta[n-1] = 0.0; Tc[1] = 0.0; Tb[1] = 0.0; Tb[n] = 0.0
print("I'm working hard, wait for fig while I count to 50")
for j in range(2,m+1):
print(j)
for i in range(2,n): Tb[i] = t[i-1][j-1] + t[i+1][j-1] \
+ (2/r-2) * t[i][j-1]
Tridiag(a, d, c, b, Ta, Td, Tc, Tb, x, n) # Solve system
for i in range(1, n+1): t[i][j] = x[i]
print("Finished")
x = list(range(1, m+1)) # Plot every other x
y = list(range(1, n+1)) # every other y
X, Y = p.meshgrid(x,y)
def functz(t): # Potential
z = t[X, Y]
return z
Z = functz(t)
fig = p.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X, Y, Z, color= 'r')
ax.set_xlabel('t')
ax.set_ylabel('x')
ax.set_zlabel('T')
p.show() # Display figure
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^product', views.product, name='product'),
url(r'^cart', views.cart, name='cart'),
]
|
# -*- coding: utf-8 -*-
# Copyright 2019 Pierre-Luc Delisle. All Rights Reserved.
# #
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# https://opensource.org/licenses/MIT
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import yaml
import logging
from samitorch.configs.configurations import Configuration
from samitorch.factories.parsers import AbstractConfigurationParserFactory
from samitorch.configs.configurations import UNetModelConfiguration
from UNet3D.config.configurations import UNet3DDatasetConfiguration, UNet3DTrainingConfiguration, \
VariableConfiguration, LoggerConfiguration
class UNet3DModelsParserFactory(AbstractConfigurationParserFactory):
def parse(self, path: str):
with open(path, 'r') as config_file:
try:
config = yaml.load(config_file, Loader=yaml.FullLoader)
unets = UNetModelConfiguration(config["segmenter"])
return unets
except yaml.YAMLError as e:
logging.error(
"Unable to read the config file: {} with error {}".format(path, e))
def register(self, model_type: str, configuration_class):
pass
class UNet3DDatasetConfigurationParserFactory(AbstractConfigurationParserFactory):
def __init__(self) -> None:
pass
def parse(self, path: str):
"""
Parse a dataset configuration file.
Args:
path (str): Configuration YAML file path.
Returns:
:obj:`samitorch.config.configurations.DatasetConfiguration`: An object containing dataset's properties.
"""
with open(path, 'r') as config_file:
try:
config = yaml.load(config_file, Loader=yaml.FullLoader)
return UNet3DDatasetConfiguration(config["dataset"]["MRBrainS"])
except yaml.YAMLError as e:
logging.error(
"Unable to read the config file: {} with error {}".format(path, e))
def register(self, model_type: str, configuration_class: Configuration):
pass
class TrainingConfigurationParserFactory(AbstractConfigurationParserFactory):
def __init__(self):
pass
def parse(self, path: str):
"""
Parse a training configuration file.
Args:
path (str): Configuration YAML file path.
Returns:
:obj:`samitorch.config.configurations.DatasetConfiguration`: An object containing dataset's properties.
"""
with open(path, 'r') as config_file:
try:
config = yaml.load(config_file, Loader=yaml.FullLoader)
config = UNet3DTrainingConfiguration(config["training"])
return config
except yaml.YAMLError as e:
logging.error(
"Unable to read the config file: {} with error {}".format(path, e))
def register(self, model_type: str, configuration_class):
pass
class VariableConfigurationParserFactory(AbstractConfigurationParserFactory):
def __init__(self):
pass
def parse(self, path: str):
with open(path, 'r') as config_file:
try:
config = yaml.load(config_file, Loader=yaml.FullLoader)
config = VariableConfiguration(config["variables"])
return config
except yaml.YAMLError as e:
logging.error(
"Unable to read the config file: {} with error {}".format(path, e))
def register(self, model_type: str, configuration_class):
pass
class LoggerConfigurationParserFactory(AbstractConfigurationParserFactory):
def __init__(self):
pass
def parse(self, path: str):
with open(path, 'r') as config_file:
try:
config = yaml.load(config_file, Loader=yaml.FullLoader)
config = LoggerConfiguration(config["logger"])
return config
except yaml.YAMLError as e:
logging.error(
"Unable to read the config file: {} with error {}".format(path, e))
def register(self, model_type: str, configuration_class):
pass
|
import serial as ser
PORT = "/dev/ttyACM0"
BAUDRATE = 230400
TIMEOUT = 1
class SerialPort:
def __init__(self, PORT="/dev/ttyACM0", BAUD=9600, timeout_secs=TIMEOUT):
self.SUCCESS_OPEN = False
try:
self.serialObj = ser.Serial(PORT, BAUD, timeout=timeout_secs)
self.SUCCESS_OPEN = True
except ser.SerialException as e:
print("Cannot open serial port, try again")
print(f"Cause: {e}")
self.x_data_buffer = None
self.y_data_buffer = None
def getNewData(self):
try:
data = str(self.serialObj.readline()).strip("b''")
data = data[0 : data.find("\\")]
return data
except:
exit(1)
if __name__ == '__main__':
UART = SerialPort(PORT, BAUDRATE)
flagFound = False
if UART.SUCCESS_OPEN:
print("Serial port successfully open!")
print(f"{PORT}, {BAUDRATE} bps")
while True:
data = UART.getNewData()
if data.find("Test", 0):
flagFound = True
if flagFound:
print(data)
|
class Carrito:
# Creación de carrito en variable session
def __init__(self, request):
# Inicializar carrito si no existe
self.request = request
self.session = request.session
carrito = self.session.get("carrito")
if not carrito:
carrito = self.session["carrito"] = {}
# Esta linea permite decirle a django que elimine las sessión del carrito cuando cierra el navegador
request.session.set_expiry(0)
# Fin
self.carrito = carrito
def add(self, producto):
# Guardar producto en el carrito si no existe
if str(producto.id) not in self.carrito.keys():
self.carrito[producto.id] = {
"producto_id": producto.id,
"nom_producto": producto.nombre,
"descripcion": producto.descripcion,
"cantidad": 1,
"precio": str(producto.precio_unitario),
"foto": producto.ruta_foto.url if producto.ruta_foto else False,
"empresa": producto.empresa.nom_empresa,
}
# Si existe el producto se suma un elemento a cantidad
else:
for key, value in self.carrito.items():
if key == str(producto.id):
value["cantidad"] += 1
break
self.save()
def add_cart(self, cart):
# Guardar producto en el carrito cuando el cliente inicia sesión
if str(cart.producto_id) not in self.carrito.keys():
self.carrito[cart.producto_id] = {
"producto_id": cart.producto_id,
"nom_producto": cart.producto.nombre,
"descripcion": cart.producto.descripcion,
"cantidad": cart.cantidad,
"precio": str(cart.producto.precio_unitario),
"foto": cart.producto.ruta_foto.url if cart.producto.ruta_foto else False,
"empresa": cart.producto.empresa.nom_empresa,
}
else:
for key, value in self.carrito.items():
if key == str(cart.producto.id):
value["cantidad"] += 1
break
self.save()
def save(self):
self.session["carrito"] = self.carrito
self.session.modified = True
def remove(self, producto):
# Eliminar el producto en el carrito
producto_id = str(producto.id)
if producto_id in self.carrito:
del self.carrito[producto_id]
self.save()
def decrement(self, producto):
# Decrementar la cantidad de producto
for key, value in self.carrito.items():
if key == str(producto.id):
value["cantidad"] -= 1
if value["cantidad"] < 1:
self.remove()
else:
self.save()
break
def clear(self):
self.session["carrito"] = {}
self.session.modified = True
|
class Solution:
# not working try this one
def find_error(self, nums):
if nums is not None:
i = 0
while i < len(nums):
while nums[i] != i + 1 and nums[nums[i] - 1] != nums[i] - 1:
nums[i], nums[nums[i] - 1] = nums[nums[i] - 1], nums[i]
i = i + 1
print(nums)
return [[x, i] for i, x in enumerate(nums) if i != x][0]
def findErrorNums(self, nums):
if nums is not None:
cl_nums = [-1] * (len(nums) + 1)
dup = None
for x in nums:
if dup is None and cl_nums[x] == x:
dup = cl_nums[x]
cl_nums[x] = x
return [dup, [i for i, x in enumerate(cl_nums) if x == -1 and i != 0][0]]
s = Solution()
print(s.findErrorNums([1,2,2,4]))
print(s.find_error([4,2,2,1]))
|
import tornado.ioloop # type: ignore
import tornado.web # type: ignore
import tornado.websocket # type: ignore
import json
import secrets
from typing import List, Dict, Any, Tuple
from enum import Enum, auto
from zipper import Zipper
class Set(object):
def __init__(self, question: str, answers: List[str], correct_answer: str) -> None:
self.question = question
self.answers = answers
self.correct_answer = correct_answer
def is_correct(self, answer: str) -> bool:
return answer == self.correct_answer
def __str__(self):
f'{self.question}: {self.answers}'
def to_json(self) -> Dict[str, Any]:
return {
'question': self.question,
'answers': self.answers
}
class AutoName(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
class Response(AutoName):
OPENED = auto()
CURRENT_QUESTION = auto()
ANSWER_SET = auto()
CURRENT_PLAYER_COUNT = auto()
CURRENT_PLAYER_NAMES = auto()
NEXT_QUESTION = auto()
QUESTIONS_INFO = auto()
NO_SUCH_ROOM = auto()
class Request(AutoName):
CURRENT_QUESTION = auto()
SET_ANSWER = auto()
JOIN_ROOM = auto()
class Engine(object):
def __init__(self) -> None:
self.questions = Zipper(
Set("What is your name?", ["a", "b", "c", "d"], "b"),
[ Set("Is this the next question?", ["yes", "no", "maybe", "what's it to you?"], "yes") ]
)
def next_question(self) -> None:
self.questions.next()
@property
def current_question(self):
return self.questions.current
class Room(object):
def __init__(self, key=None) -> None:
self.engine = Engine()
self.active_connections : List[EchoWebSocket] = []
self.key = key
def publish_message(self, message: str) -> None:
for connection in self.active_connections:
connection.publish(message)
def add_connection(self, connection: Any) -> None:
print('adding connection..')
self.active_connections.append(connection)
connection.answer = None
self.send_response_to_single(Response.CURRENT_QUESTION, connection)
self.send_response_to_all(Response.CURRENT_PLAYER_COUNT)
self.send_response_to_all(Response.CURRENT_PLAYER_NAMES)
self.send_response_to_all(Response.QUESTIONS_INFO)
def remove_connection(self, connection: Any) -> None:
self.active_connections.remove(connection)
self.send_response_to_all(Response.CURRENT_PLAYER_COUNT)
@property
def all_answered(self):
return all (connection.answer is not None for connection in self.active_connections)
@property
def number_of_active_connections(self) -> int:
return len(self.active_connections)
def make_response_json(self, response: Response) -> str:
type_ = response.name
props: Dict[str, Any] = {}
if response is Response.OPENED:
pass
elif response is Response.CURRENT_QUESTION:
props.update(self.engine.current_question.to_json())
elif response is Response.ANSWER_SET:
pass
elif response is Response.CURRENT_PLAYER_COUNT:
props.update({'count': self.number_of_active_connections})
elif response is Response.CURRENT_PLAYER_NAMES:
props.update({'names': [con.name for con in self.active_connections] })
print('sending', props)
elif response is Response.NEXT_QUESTION:
type_ = Response.CURRENT_QUESTION.name
props.update(self.engine.current_question.to_json())
elif response is Response.QUESTIONS_INFO:
props.update({
'amount': self.engine.questions.size,
'index': self.engine.questions.current_index
})
return json.dumps({
"response": type_,
"props": props
})
def process_request(self, connection: Any, request: Request, args: Dict[str, Any]) -> None:
if request is Request.CURRENT_QUESTION:
self.send_response_to_single(Response.CURRENT_QUESTION, connection)
elif request is Request.SET_ANSWER:
connection.answer = args['answer']
self.send_response_to_single(Response.ANSWER_SET, connection)
if self.all_answered:
self.engine.next_question()
self.send_response_to_all(Response.NEXT_QUESTION)
self.send_response_to_all(Response.QUESTIONS_INFO)
def send_response_to_all(self, response: Response):
self.publish_message(self.make_response_json(response))
def send_response_to_single(self, response: Response, connection):
connection.write_message(self.make_response_json(response))
def parse_request_json(request: str) -> Tuple[Request, Any]:
as_json = json.loads(request)
if 'request' not in as_json:
if 'JOIN_ROOM' in as_json:
return (Request.JOIN_ROOM, as_json['JOIN_ROOM'])
return None
request = as_json['request']
if request == 'CURRENT_QUESTION':
return (Request.CURRENT_QUESTION, None)
elif request == 'SET_ANSWER':
return (Request.SET_ANSWER, as_json['props'])
return None
rooms : List[Room] = []
def create_new_room() -> Room:
global rooms
known_keys = [room.key for room in rooms]
while True:
token = secrets.token_hex(4)
if token not in known_keys:
break
new_room = Room(token)
rooms.append(new_room)
return new_room
def find_room(room_key: str) -> Room:
global rooms
print(rooms)
for room in rooms:
if room.key == room_key:
return room
return None
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
def check_origin(self, origin):
return True
def publish(self, message: str) -> None:
self.write_message(message)
def open(self):
self.room = None
@property
def name(self):
return 'Jim'
def on_message(self, message):
parsed = parse_request_json(message)
print('Got request', parsed)
if parsed is None:
return
(request, args) = parsed
if request is Request.JOIN_ROOM:
self.room = find_room(args)
if self.room is None:
self.publish(json.dumps({
'response':'NO_SUCH_ROOM'
}))
return
self.room.add_connection(self)
return
elif self.room is None:
return
self.room.process_request(self, request, args)
def on_close(self):
if self.room is None:
return
self.room.remove_connection(self)
class CreateRoomHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
def check_origin(self, origin):
return True
def get(self):
room = create_new_room()
self.write(json.dumps({
'room_name': room.key
}))
class JoinRoomHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
def check_origin(self, origin):
return True
def get(self):
room_key = self.get_query_argument('room_key')
room = find_room(room_key)
if room is None:
self.write(json.dumps({
'error': 'Room not found'
}))
else:
self.write(json.dumps({
}))
if __name__ == "__main__":
import sh
import os
os.chdir("../")
elm_make = sh.Command("elm-make")
output = elm_make("src/Page/Main.elm", output="elm.js")
os.chdir("server")
print(output)
print('Finished building Elm file..')
settings = {
'debug': True,
'static_path': '../'
}
application = tornado.web.Application([
(r"/websocket", EchoWebSocket),
(r"/create_room", CreateRoomHandler),
(r"/join_room", JoinRoomHandler),
], **settings)
print('Listening on http://localhost:8888..')
application.listen(8888)
tornado.ioloop.IOLoop.current().start() |
# Fit a classifier to the 20 newsgroup dataset
#https://scikit-learn.org/dev/tutorial/text_analytics/working_with_text_data.html
#
#https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_20newsgroups_vectorized.html
import superimport
import numpy as np
categories = ['alt.atheism', 'soc.religion.christian','comp.graphics', 'sci.med']
from sklearn.datasets import fetch_20newsgroups
data = fetch_20newsgroups(
subset='train', categories=categories, shuffle=True, random_state=42)
print("Num. training docs {}".format(len(data.data))) # 2257
for line in data.data[0].split("\n"): print(line)
"""
Doc 0 has length 21, class id is 1, label is comp.graphics
From: sd345@city.ac.uk (Michael Collier)
Subject: Converting images to HP LaserJet III?
Nntp-Posting-Host: hampton
Organization: The City University
Lines: 14
Does anyone know of a good way (standard PC application/PD utility) to
convert tif/img/tga files into LaserJet III format. We would also like to
do the same, converting to HPGL (HP plotter) files.
Please email any response.
Is this the correct group?
Thanks in advance. Michael.
--
Michael Collier (Programmer) The Computer Unit,
Email: M.P.Collier@uk.ac.city The City University,
Tel: 071 477-8000 x3769 London,
Fax: 071 477-8565 EC1V 0HB.
"""
#from sklearn.model_selection import train_test_split
X_train = data.data
y_train = data.target
data_test = fetch_20newsgroups(subset='test',
categories=categories, shuffle=True, random_state=42)
X_test = data_test.data
y_test = data_test.target
print("Num. testing docs {}".format(len(data_test.data))) # 1502
###################
# Fit logreg using tfidf
from sklearn.pipeline import Pipeline
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
pipeline = Pipeline([
('bow', CountVectorizer(tokenizer=nltk.word_tokenize)),
('tfidf', TfidfTransformer(use_idf=True, smooth_idf=True))
])
X_train_tfidf = pipeline.fit_transform(X_train)
print(np.shape(X_train_tfidf)) # (2257, 42100)
X_test_tfidf = pipeline.transform(X_test)
print(np.shape(X_test_tfidf)) # (1502, 42100)
from sklearn.linear_model import LogisticRegression
# For simplicity, we set the L2 regularizer to a constant lambda=1/C=1e-3
logreg_tfidf = LogisticRegression(C=1e3, solver='lbfgs', multi_class='multinomial')
logreg_tfidf.fit(X_train_tfidf, y_train)
ypred_tfidf = logreg_tfidf.predict(X_test_tfidf)
accuracy_tfidf = np.mean(ypred_tfidf == y_test)
print(accuracy_tfidf) # 90.6%
# According to https://scikit-learn.org/dev/tutorial/text_analytics/working_with_text_data.html,
# multinomial naive Bayes (on tf-idf rep) gets 83.5% and an SVM gets 91%
###############
# Now use word embeddings
# https://spacy.io/
import spacy
nlp = spacy.load('en_core_web_md', disable=['tagger','parser','ner']) # Just tokenize
X_train_embed = [nlp(doc).vector for doc in X_train]
print(np.shape(X_train_embed)) # (2257, 300)
X_test_embed = [nlp(doc).vector for doc in X_test]
print(np.shape(X_test_embed)) # (1502, 300)
from sklearn.linear_model import LogisticRegression
# For simplicity, we set the L2 regularizer to a constant lambda=1/C=1e-3
logreg_embed = LogisticRegression(C=1e3, solver='lbfgs', multi_class='multinomial')
logreg_embed.fit(X_train_embed, y_train)
ypred_embed = logreg_embed.predict(X_test_embed)
accuracy_embed = np.mean(ypred_embed == y_test)
print(accuracy_embed) # 86.9%
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 2 16:43:33 2022
@author: Dartoon
"""
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(precision=4)
import matplotlib as mat
import matplotlib.lines as mlines
from matplotlib import colors
mat.rcParams['font.family'] = 'STIXGeneral'
host=plt.figure(figsize=(14.5,12))
ax=host.add_subplot(111) #to get the log(1+z) and z label
import matplotlib as mpl
mpl.rc('image', cmap='jet')
# import sys
# sys.path.insert(0,'../py_tools')
########## input local data ####
#==============================================================================
# The seleting for dm and host_total and dmag are in this local
#==============================================================================
#from dmag import pass_dmag
########input 25 local by Bennert++2011 ############
bloc = np.array([[ 0.054 , 10.12 , 0.24 , 7.436 , 0.4 ],
[ 0.043 , 10.95 , 0.23 , 8.006 , 0.4 ],
[ 0.076 , 10.33 , 0.22 , 7.436 , 0.4 ],
[ 0.041 , 10.38 , 0.23 , 7.166 , 0.4 ],
[ 0.051 , 10.5 , 0.23 , 8.516 , 0.4 ],
[ 0.0524, 10.32 , 0.23 , 6.976 , 0.4 ],
[ 0.0475, 9.83 , 0.24 , 7.666 , 0.4 ],
[ 0.055 , 10.41 , 0.23 , 7.836 , 0.4 ],
[ 0.0355, 10.33 , 0.22 , 7.766 , 0.4 ],
[ 0.021 , 10.2 , 0.22 , 7.366 , 0.4 ],
[ 0.038 , 10.26 , 0.22 , 7.356 , 0.4 ],
[ 0.0229, 9.94 , 0.24 , 7.496 , 0.4 ],
[ 0.047 , 10.14 , 0.22 , 7.646 , 0.4 ],
[ 0.0559, 9.65 , 0.22 , 6.976 , 0.4 ],
[ 0.0501, 10.11 , 0.23 , 7.956 , 0.4 ],
[ 0.0541, 10.04 , 0.23 , 7.076 , 0.4 ],
[ 0.0558, 10.73 , 0.21 , 7.506 , 0.4 ],
[ 0.0365, 10.3 , 0.24 , 7.476 , 0.4 ],
[ 0.0304, 10.24 , 0.24 , 7.826 , 0.4 ],
[ 0.0481, 9.92 , 0.22 , 7.276 , 0.4 ],
[ 0.0483, 10. , 0.23 , 7.626 , 0.4 ],
[ 0.0465, 9.82 , 0.23 , 7.546 , 0.4 ],
[ 0.0532, 9.95 , 0.23 , 7.776 , 0.4 ],
[ 0.0585, 10.33 , 0.22 , 7.216 , 0.4 ],
[ 0.0409, 10.33 , 0.22 , 7.34 , 0.4 ]])
########input 30 local by Haring 04 ############
hloc = np.array([[3.7484e-03, 1.1778e+01, 1.8000e-01, 9.4771e+00, 2.1298e-01],
[3.4930e-03, 1.0362e+01, 1.8000e-01, 7.1461e+00, 1.4262e-01],
[2.4703e-03, 1.0833e+01, 1.8000e-01, 8.0000e+00, 1.0206e-01],
[4.2822e-03, 1.1556e+01, 1.8000e-01, 8.6335e+00, 2.1306e-01],
[7.3369e-03, 1.1556e+01, 1.8000e-01, 8.7160e+00, 3.2389e-01],
[2.4297e-02, 1.1748e+01, 1.8000e-01, 8.7243e+00, 3.1358e-01],
[1.3564e-02, 1.1462e+01, 1.8000e-01, 8.5185e+00, 2.0838e-01],
[3.6091e-03, 9.7924e+00, 1.8000e-01, 7.1461e+00, 1.8221e-01],
[5.6030e-03, 1.1114e+01, 1.8000e-01, 7.5682e+00, 1.6526e-01],
[6.7825e-03, 1.1462e+01, 1.8000e-01, 9.3979e+00, 3.9967e-01],
[1.7743e-04, 1.0568e+01, 1.8000e-01, 7.6532e+00, 1.8656e-01],
[1.8910e-04, 8.9031e+00, 1.8000e-01, 6.3979e+00, 3.4062e-01],
[2.6564e-03, 1.0839e+01, 1.8000e-01, 7.6435e+00, 4.6942e-01],
[5.3251e-03, 1.0881e+01, 1.8000e-01, 7.1461e+00, 2.2797e-01],
[2.2610e-03, 1.1079e+01, 1.8000e-01, 9.0000e+00, 2.3856e-01],
[4.8618e-03, 1.0833e+01, 1.8000e-01, 8.3222e+00, 2.8354e-01],
[2.6099e-03, 1.0204e+01, 1.8000e-01, 8.0000e+00, 6.1650e-01],
[2.7029e-03, 1.0301e+01, 1.8000e-01, 7.2041e+00, 4.3571e-01],
[5.3483e-03, 1.0987e+01, 1.8000e-01, 8.2788e+00, 2.5972e-01],
[6.0889e-03, 1.1114e+01, 1.8000e-01, 8.4914e+00, 2.0586e-01],
[3.5627e-03, 1.0079e+01, 1.8000e-01, 8.4771e+00, 2.4800e-01],
[3.6556e-03, 1.0964e+01, 1.8000e-01, 8.0414e+00, 5.6735e-01],
[3.4930e-03, 1.0643e+01, 1.8000e-01, 7.7482e+00, 4.3388e-01],
[2.2842e-03, 1.1431e+01, 1.8000e-01, 9.0000e+00, 3.3450e-01],
[3.9109e-03, 1.1690e+01, 1.8000e-01, 9.3010e+00, 4.8455e-02],
[2.7262e-03, 1.1041e+01, 1.8000e-01, 8.2304e+00, 6.2621e-01],
[6.0195e-03, 1.0568e+01, 1.8000e-01, 8.3802e+00, 1.0654e-01],
[5.3483e-03, 1.0176e+01, 1.8000e-01, 7.1139e+00, 1.8447e-01],
[3.0748e-03, 9.8451e+00, 1.8000e-01, 6.5441e+00, 1.4739e-01],
[2.3349e-05, 1.0041e+01, 1.8000e-01, 6.5682e+00, 1.5707e-01]])
zs = np.array([3.7484e-03, 3.4930e-03, 2.4703e-03, 4.2822e-03, 7.3369e-03,
2.4297e-02, 1.3564e-02, 3.6091e-03, 5.6030e-03, 6.7825e-03,
1.7743e-04, 1.8910e-04, 2.6564e-03, 5.3251e-03, 2.2610e-03,
4.8618e-03, 2.6099e-03, 2.7029e-03, 5.3483e-03, 6.0889e-03,
3.5627e-03, 3.6556e-03, 3.4930e-03, 2.2842e-03, 3.9109e-03,
2.7262e-03, 6.0195e-03, 5.3483e-03, 3.0748e-03, 2.3349e-05])
hloc[:,0] = zs
#############################################################
###################fitting with MCMC#########################
x=np.append(bloc[:,1], hloc[:,1])
y=np.append(bloc[:,3], hloc[:,3])
yerr=(np.append(bloc[:,2], hloc[:,2])**2+np.append(bloc[:,4], hloc[:,4])**2)**0.5 # 0.2 is the uncertainty level for the L_R
def lnlike(theta, x, y, yerr):
m, b, sint= theta
model = m * x + b
sigma2 = (yerr**2 + sint**2)
if sint>=0 :
return -0.5*(np.sum((y-model)**2/sigma2)+np.sum(np.log(2*np.pi*sigma2)))
else:
return -np.inf
import scipy.optimize as op
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [1.036, -1.947, 0.3], args=(x, y, yerr))
m_ml, b_ml,sint_ml= result["x"]
def lnprior(theta):
m, b, sint = theta
if -5.0 < m < 5 and -10 < b < 10.0 and 0 < sint < 10:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = 3, 100
pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
import emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, 500)
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
m_mid, b_mid, sint_mid =np.percentile(samples, 50,axis=0)
######################
style = 1#input("0 as SS13, 1 as delta(logMBH):\n")
if style == 0:
plt.plot(np.log10(hloc[:,0]+1),
10**(hloc[:,3]-hloc[:,1]), '.',color='black',markersize=10)
plt.plot(np.log10(bloc[:,0]+1),
10**(bloc[:,3]-bloc[:,1]),'.',color='gray',markersize=10)
elif style ==1:
xl = np.linspace(-0.9, 13, 100)
plt.errorbar(np.log10(hloc[:,0]+1),
hloc[:,3]-(m_ml*hloc[:,1]+b_ml),yerr=(hloc[:,2]**2 + hloc[:,4]**2)**0.5 ,fmt='.',color='black',markersize=10)
plt.errorbar(np.log10(bloc[:,0]+1),
bloc[:,3]-(m_ml*bloc[:,1]+b_ml),yerr=(bloc[:,2]**2 + bloc[:,4]**2)**0.5 ,fmt='.',color='gray',markersize=10)
ty=xl*0
ty1=xl*0+np.std(y-(m_ml*x+b_ml))
ty2=xl*0-np.std(y-(m_ml*x+b_ml))
plt.fill_between(xl,ty1,ty2,color='lightgray',zorder=-50, alpha = 0.5)
Bkc=mlines.Line2D([], [], color='gray', ls='', marker='.', markersize=15)
Hkc=mlines.Line2D([], [], color='black', ls='', marker='.', markersize=15)
######################
# f0 ='data/SS13_MM.txt'
# ss = np.loadtxt(f0)[:,1:] #0 redshift; 1 M*; 2 BH mass;
# f1 ='data/B11_MM.txt'
# b11 = np.loadtxt(f1)[:,1:] #0 redshift; 1 M*; 2 BH mass;
# f2 = 'data/Cisternas_data.txt'
# cis11 = np.loadtxt(f2) #0 redshift;
# f3 = 'data/high_edd_agn.txt'
# Knud = np.loadtxt(f3)[:,2:] # 0 redshift; 1 L_bol; 2 M_BH; 3 M_acc; 4 M_*
ss = np.array([[ 0.717, 10.66 , 6.936, 1. ],
[ 1.065, 9.8 , 6.836, 0.5 ],
[ 0.96 , 10.49 , 7.116, 0.23 ],
[ 0.97 , 10.29 , 7.986, 1. ],
[ 0.544, 11.01 , 8.376, 1. ],
[ 1.044, 10.33 , 7.656, 0.35 ],
[ 0.675, 10.96 , 7.596, 1. ],
[ 0.569, 10.55 , 7.636, 0.4 ],
[ 0.737, 10.66 , 8.896, 0.32 ],
[ 0.664, 10.19 , 6.806, 0.41 ],
[ 0.837, 10.39 , 8.106, 0.33 ],
[ 0.74 , 10.76 , 7.776, 0.24 ],
[ 0.733, 10.88 , 7.656, 0.67 ],
[ 0.622, 10.88 , 7.376, 0.49 ],
[ 1.034, 10.91 , 7.866, 0.77 ],
[ 0.841, 11.34 , 8.406, 0.7 ]])
b11 = np.array([[ 1.227, 10.58 , 8.87 , 9.83 ],
[ 1.9 , 10.64 , 9.17 , 10.64 ],
[ 1.22 , 10.54 , 8.24 , 10.54 ],
[ 1.031, 10.78 , 7.85 , 9.53 ],
[ 1.617, 10.61 , 8.08 , 10.61 ],
[ 1.615, 10.45 , 8.3 , 10.45 ],
[ 1.037, 9.62 , 7.75 , 9.62 ],
[ 1.218, 10.71 , 8.37 , 10.71 ],
[ 1.371, 10.9 , 8.27 , 9.99 ],
[ 1.021, 10.96 , 8.35 , 9.29 ],
[ 1.45 , 10.74 , 8.77 , 10.74 ]])
cis11 = np.array([[ 0.73, 7.72, 10.3 ],
[ 0.34, 8.29, 11.23],
[ 0.34, 8.08, 10.65],
[ 0.34, 8.39, 11.02],
[ 0.35, 7.39, 10.54],
[ 0.34, 8.66, 11.14],
[ 0.38, 7.77, 10.68],
[ 0.35, 7.24, 10.95],
[ 0.85, 8.29, 11.07],
[ 0.7 , 8.15, 11.17],
[ 0.44, 7.79, 10.57],
[ 0.35, 7.59, 10.47],
[ 0.37, 8.58, 10.57],
[ 0.77, 8.49, 10.86],
[ 0.73, 8.03, 11.01],
[ 0.83, 8.07, 10.81],
[ 0.52, 8.01, 10.54],
[ 0.73, 7.41, 10.36],
[ 0.36, 8.07, 11.28],
[ 0.55, 7.75, 11.08],
[ 0.69, 7.91, 10.66],
[ 0.53, 8.22, 10.84],
[ 0.62, 7.35, 10.53],
[ 0.67, 7.73, 10.75],
[ 0.79, 8.24, 10.53],
[ 0.52, 8.38, 11.15],
[ 0.37, 7.7 , 10.48],
[ 0.55, 8.61, 11.2 ],
[ 0.63, 7.5 , 10.73],
[ 0.82, 7.82, 10.77],
[ 0.66, 8.19, 11.03],
[ 0.38, 8.25, 11.08]])
# Knud = np.array([[ 1.841, 46.58 , 8.55 , 6.56 , 10.69 ],
# [ 2.039, 47.15 , 8.68 , 24.63 , 10.81 ],
# [ 2.086, 46.76 , 8.67 , 10.22 , 10.8 ],
# [ 1.96 , 46.92 , 8.65 , 14.33 , 10.77 ],
# [ 2.064, 46.88 , 8.67 , 13.11 , 10.79 ],
# [ 1.86 , 46.72 , 8.6 , 9.15 , 10.73 ],
# [ 1.879, 46.61 , 8.62 , 7.21 , 10.75 ],
# [ 1.953, 46.89 , 8.67 , 13.74 , 10.79 ],
# [ 1.894, 46.96 , 8.6 , 15.88 , 10.73 ],
# [ 1.807, 46.66 , 8.64 , 7.92 , 10.77 ],
# [ 1.846, 46.59 , 8.63 , 6.78 , 10.76 ],
# [ 1.873, 46.85 , 8.58 , 12.38 , 10.72 ],
# [ 2.023, 46.55 , 8.58 , 6.19 , 10.72 ],
# [ 1.934, 46.74 , 8.64 , 9.77 , 10.77 ],
# [ 1.96 , 46.83 , 8.56 , 11.95 , 10.69 ],
# [ 1.943, 46.71 , 8.68 , 8.96 , 10.81 ],
# [ 1.966, 46.66 , 8.69 , 8.14 , 10.81 ],
# [ 2.145, 46.83 , 8.61 , 11.67 , 10.74 ],
# [ 1.984, 46.95 , 8.62 , 15.9 , 10.75 ],
# [ 1.914, 47.02 , 8.51 , 18.22 , 10.65 ],
# [ 2.004, 46.7 , 8.63 , 8.67 , 10.76 ]])
#%%
#==============================================================================
# 32 QSO in Ding2020
#==============================================================================
# from load_result import load_host_p, load_MBH, load_err
# from load_result import load_zs, load_n
ID = ['CDFS-1', 'CID543','CID70', 'SXDS-X735', 'CDFS-229', 'CDFS-321', 'CID1174',\
'CID216', 'CID237','CID3242','CID3570','CID452', 'CID454',\
'CID50','CID607','LID1273', 'LID1538','LID360','SXDS-X1136',\
'SXDS-X50', 'SXDS-X717','SXDS-X763','SXDS-X969','XID2138','XID2202',\
'XID2396', 'CID206', 'ECDFS-358', 'CDFS-724', 'CID597','CID1281','CID255']
MB_ID = ['CDFS-1', 'CID543','CID70', 'SXDS-X735', 'CDFS-229', 'ECDFS-321', 'CID1174',\
'CID216', 'CID237','CID3242','CID3570','CID452', 'CID454',\
'CID50','CID607','LID1273', 'LID1538','LID360','SXDS-X1136',\
'SXDS-X50', 'SXDS-X717','SXDS-X763','SXDS-X969','LID1820','LID1622',\
'LID1878', 'CID206', 'ECDFS-358', 'CDFS-724', 'CID597','CID1281','CID255']
zs = np.array([1.63 , 1.301, 1.667, 1.447, 1.326, 1.57 , 1.552, 1.567, 1.618,
1.532, 1.244, 1.407, 1.478, 1.239, 1.294, 1.617, 1.527, 1.579,
1.325, 1.411, 1.276, 1.412, 1.585, 1.551, 1.516, 1.6 , 1.483,
1.626, 1.337, 1.272, 1.445, 1.664])
host_n = np.array([4.8064, 0.4933, 3.616 , 2.0273, 0.4632, 2.2601, 1.8625, 6.1593,
4.7361, 6.1424, 0.7278, 1.4017, 0.6239, 3.2167, 3.418 , 1.2204,
2.8195, 0.7517, 1.9862, 1.6483, 5.58 , 2.3938, 2.0529, 1.2396,
3.9565, 0.7716, 3.0717, 1.6708, 1.6177, 1.7523, 3.1462, 4.2099])
Mstar = np.array([10.2864, 10.4345, 10.5518, 10.7849, 10.6283, 11.1009, 10.6314,
10.6296, 10.7542, 10.747 , 10.7134, 10.8578, 10.6966, 10.801 ,
10.7502, 10.8897, 10.7078, 10.6594, 10.4861, 10.5363, 10.5055,
9.6791, 10.6088, 10.4758, 10.7369, 10.6927, 10.4496, 10.7348,
9.7876, 10.4582, 9.9995, 10.6496])
MBs = np.array([7.8378, 8.1868, 8.2689, 8.4403, 8.1997, 8.457 , 7.9915, 7.8539,
8.2891, 8.4494, 7.8876, 8.2953, 8.3114, 8.4229, 8.5263, 8.5606,
8.4694, 8.4497, 8.3326, 7.9417, 8.2039, 8.4646, 8.1956, 8.5519,
8.4632, 8.455 , 8.5278, 8.1218, 8.0246, 7.8128, 7.7488, 8.2727])
Mstar_err = np.array([[-0.15, 0.19], [-0.15, 0.19], [-0.14, 0.16], [-0.14, 0.17],
[-0.11, 0.12], [-0.2 , 0.3 ], [-0.15, 0.18], [-0.1 , 0.1 ],
[-0.13, 0.14], [-0.15, 0.17], [-0.1 , 0.1 ], [-0.1 , 0.1 ],
[-0.1 , 0.1 ], [-0.21, 0.36], [-0.18, 0.25], [-0.12, 0.13],
[-0.12, 0.13], [-0.11, 0.11], [-0.13, 0.14], [-0.13, 0.15],
[-0.12, 0.12], [-0.24, 0.48], [-0.17, 0.23], [-0.12, 0.12],
[-0.14, 0.16], [-0.19, 0.28], [-0.25, 0.53], [-0.14, 0.16],
[-0.18, 0.25], [-0.18, 0.24], [-0.15, 0.18], [-0.15, 0.18]])
yerr_highz = [((m_ml*Mstar_err[:,0])**2+0.4**2)**0.5, ((m_ml*Mstar_err[:,1])**2+0.4**2)**0.5]
plt.errorbar(np.log10(1+ss[:,0]),ss[:,2]-(m_ml*ss[:,1]+b_ml),yerr=(0.4**2+(m_ml*0.2)**2)**0.5,fmt='^',color='darkseagreen',markersize=9)
plt.errorbar(np.log10(1+b11[:,0]),b11[:,2]-(m_ml*b11[:,1]+b_ml),yerr=(0.4**2+(m_ml*0.2)**2)**0.5,fmt='^',color='darkseagreen',markersize=9)
plt.errorbar(np.log10(1+cis11[:,0]),cis11[:,1]-(m_ml*cis11[:,2]+b_ml),yerr=(0.4**2+(m_ml*0.35)**2)**0.5,fmt='^',color='darkseagreen',markersize=9)
# plt.errorbar(np.log10(1+Knud[:,0]),Knud[:,2]-(m_ml*Knud[:,4]+b_ml),yerr=0,fmt='o',color='blue',markersize=9)
ding20_sample = np.log10(1+zs),MBs-(m_ml*Mstar+b_ml)
plt.scatter(np.log10(1+zs),MBs-(m_ml*Mstar+b_ml),c='lightsalmon',
s=420,marker=".",zorder=300, vmin=0.3, vmax=5, edgecolors='k', alpha = 0.8)
plt.errorbar(np.log10(1+zs),MBs-(m_ml*Mstar+b_ml),
yerr= yerr_highz,
color='lightsalmon', fmt='.',markersize=1)
# #####fit the evolution##########
# ################################
z_ss, y_ss = ss[:,0], ss[:,2]-(m_ml*ss[:,1]+b_ml)
z_b11, y_b11 = b11[:,0], b11[:,2]-(m_ml*b11[:,1]+b_ml)
z_cis, y_cis = cis11[:,0], cis11[:,1]-(m_ml*cis11[:,2]+b_ml)
z_cosmos, y_cosmos = zs, MBs-(m_ml*Mstar+b_ml)
yerr_hz = (yerr_highz[0]+ yerr_highz[1])/2
# z=np.concatenate((z_ss, z_b11, z_cis, z_cosmos),axis=0)
# y=np.concatenate((y_ss, y_b11, y_cis, y_cosmos),axis=0)
# yerr_imd= np.zeros(len(z_ss)+len(z_b11))+(0.4**2+(m_ml*0.2)**2)**0.5 # the error for the fitting
# yerr_cis = np.zeros(len(z_cis)) + (0.4**2+(m_ml*0.35)**2)**0.5
# yerr = np.concatenate((yerr_imd,yerr_cis, yerr_hz),axis=0)
#if consider 32 AGN only:
z=z_cosmos
y=y_cosmos
yerr = yerr_hz
yerr = np.sqrt(yerr**2 + sint_ml**2)
#### fit with emcee ###############
x=np.log10(1+z)
y=y
def lnlike(theta, x, y, yerr):
b, sint= theta
model = b*x
sigma2 = (yerr**2 + sint**2)
if sint>=0 :
return -0.5*(np.sum((y-model)**2/sigma2)+np.sum(np.log(2*np.pi*sigma2)))
else:
return -np.inf
import scipy.optimize as op
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [1.8, 0.3], args=(x, y, yerr))
b_ml_offset,_= result["x"]
xp = np.array([5, 13])
def lnprior(theta):
b, sint = theta
if -10 < b < 10.0 and 0 < sint < 10:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = 2, 100
pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
import emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, 500)
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
b_ml_offset, _ =np.percentile(samples, 50,axis=0)
#print "lnlike=",lnlike(theta=[b_ml_offset, sint_mid],x=x, y=y, yerr=yerr)
xl = np.linspace(0, 5, 100)
plt.plot(xl, xl*0+xl*b_ml_offset, color="red", linewidth=4.0,zorder=0)
plt.plot(xl, xl*0, color="black", linewidth=2.0,zorder=0)
def find_n(array,value): #get the corresponding b for a given m
idx= (np.abs(array-value)).argmin()
return array[idx]
b=np.percentile(samples,50,axis=0)[0]
#print samples[:,1][samples[:,0]==find_n(samples[:,0],m)]
for i in range(100):
posi=np.random.uniform(16,84)
b=np.percentile(samples,posi,axis=0)[0]
#print b
plt.plot(xl, xl*0+xl*b, color="pink", alpha=0.1,linewidth=7.0,zorder=-1+np.random.normal(0,0.02))
# value=round(b_ml_offset,2)
#####################
value,sig=round(b_ml_offset,2),round((np.percentile(samples,84,axis=0)[0]-np.percentile(samples,16,axis=0)[0])/2,2)
print(value,sig)
#%%
f = open("table_summary.txt","r")
string = f.read()
lines = string.split('\n') # Split in to \n
my_t1 = np.loadtxt('./table_sersic_Re_n.txt', dtype='str')
my_mbh_tb = np.loadtxt('./table_cal_MBH.txt', dtype='str')
my_CAS = np.loadtxt('./table_asy_concentration.txt', dtype='str')
result = []
my_bh_type = 2 # Halpha 1, Hbeta 2
for line in lines[1:-1]:
results = line.split()
ID, z, smass, Mbh, magG, magR, magI, magZ, magY = results
sersic_n = float(my_t1[ID == my_t1[:,0]][0][6])
asy = float(my_CAS[ID == my_CAS[:,0]][0][5]) #I band asy
conc = float(my_CAS[ID == my_CAS[:,0]][0][6]) #I band conc
my_mbh = my_mbh_tb[ID == my_mbh_tb[:,0]][0][my_bh_type]
if float(smass) >0:
result.append([float(z), float(smass), float(my_mbh), sersic_n, asy, conc])
result = np.array(result)
inf_z, inf_Mstar,inf_MBHs,inf_n, inf_asy, inf_conc =result[:,0], result[:,1], result[:,2], result[:,3], result[:,4], result[:,5]
inf_z = inf_z
inf_Mstar = inf_Mstar
inf_MBHs = inf_MBHs
inf_MBHs_err = inf_MBHs*0 + 0.5
yerr_highz = ((m_ml*np.ones_like(inf_Mstar)*0.2)**2+0.4**2)**0.5
# plt.errorbar(np.log10(1+inf_z),inf_MBHs-(m_ml*inf_Mstar+b_ml),
# yerr= yerr_highz,fmt='^',color='gray',markersize=4, )
inf_x=np.log10(1+inf_z)
inf_y=inf_MBHs-(m_ml*inf_Mstar+b_ml)
inf_x = inf_x
yerr_highz = yerr_highz
inf_y = inf_y
# plt.scatter(inf_x,inf_y,c='blue',
# s=220, marker=".",zorder=-1, edgecolors='k', alpha = 0.4)
bool_ = (inf_n>0) * (inf_MBHs>0)* (inf_asy<0.07)
import seaborn as sns
# sns.kdeplot(inf_x[bool_],inf_y[bool_], linewidths = 2, color = 'blue',
# fill=True, alpha=0.6, zorder = 1)
# plt.scatter(inf_x[bool_],inf_y[bool_],c=inf_n[bool_],
# s=220, marker=".",zorder=100, alpha = 0.5)
# cbar = plt.colorbar()
# plt.clim(0, 4)
# cbar.ax.tick_params(labelsize=20)
# cbar.ax.set_ylabel('sersic n', rotation=270, fontsize = 25, labelpad=25)
# print(np.mean(inf_y[bool_]))
# plt.scatter(inf_x[bool_],inf_y[bool_],c=inf_asy[bool_],
# s=220, marker=".",zorder=100, alpha = 0.5)
# cbar = plt.colorbar()
# plt.clim(0, 0.4)
# cbar.ax.tick_params(labelsize=20)
# cbar.ax.set_ylabel('asy', rotation=270, fontsize = 25, labelpad=25)
plt.scatter(inf_x[bool_],inf_y[bool_],c=inf_conc[bool_],
s=220, marker=".",zorder=100, alpha = 0.5)
cbar = plt.colorbar()
plt.clim(1, 5)
cbar.ax.tick_params(labelsize=20)
cbar.ax.set_ylabel('concentration', rotation=270, fontsize = 25, labelpad=25)
#%% Where loop ends
plt.xlabel(r"log(1+z)",fontsize=45)
ding_sample = mlines.Line2D([], [], color='lightsalmon', ls='', marker='.', markersize=20,markeredgecolor='k')
plt.xticks(np.arange(-0.1,1,0.1))
xl=-0.01
xh=np.log10(1+2.5)
if style ==0:
ax.set_yscale('log')
plt.axis([xl,xh,0,0.5])
plt.ylabel(r"M$_{\rm BH}$/M$_*$",fontsize=45)
if style ==1:
plt.yticks(np.arange(-5.5,6,0.5))
plt.axis([xl,xh,-2.0,3.5])
plt.ylim([-3.5,3.5])
plt.ylabel(r"$\Delta$logM$_{\rm BH}$ (vs M$_*$)",fontsize=45)
plt.grid()
plt.tick_params(labelsize=35)
ax2=ax.twiny()
tticks=np.array([10**xl-1,0.5,1,1.5,2,10**xh-1])
ax2.set_xticks([np.log(t+1) for t in tticks]) # for the entire scale
ax2.set_xticklabels([0,0.5,1,1.5,2,2.5]) # 0 actuall is corresponds to 10**-0.01-1
ax2.set_xlabel('z',fontsize=45)
plt.tick_params(labelsize=35)
SS13 = mlines.Line2D([], [], color='darkseagreen', ls='', marker='^', markersize=13)
plt.legend([Bkc, Hkc, SS13, ding_sample],[
'Local by Bennert+11',\
"Local by H&R",
"Intermediate redshift AGNs",
"$1.2<z<1.7$ AGNs by D20"
],scatterpoints=1,numpoints=1,loc=3,prop={'size':22,'family': 'Arial'},ncol=2,handletextpad=0)
# plt.savefig("MBH-Mstar_vz.pdf")
plt.show()
|
import torch
from transformers import T5Model, T5Tokenizer
# from transformers import T5Stack, T5Model, T5Tokenizer
from fastNLP import seq_len_to_mask
from fastNLP.modules import Seq2SeqEncoder, Seq2SeqDecoder, State
import torch.nn.functional as F
from fastNLP.models import Seq2SeqModel
from torch import nn
import logging
from collections import namedtuple
from .adapter import Adapter, AdapterT5Block
import random
EOS_ID = 1
PAD_ID = 0
class MyAdapterT5Stack(nn.Module):
def __init__(self, model, adapter_list, model_parallel, adapter_size):
super().__init__()
self.model = model
for p in self.model.t5_encoder.parameters():
p.requires_grad = False
for i, block in enumerate(self.model.t5_encoder.block):
if i in adapter_list:
self.model.t5_encoder.block[i] = AdapterT5Block(
block,
project_hidden_size=self.model.t5_encoder.embed_tokens.weight.shape[1],
adapter_size=adapter_size,
model_parallel=model_parallel,
)
self.embed_tokens = self.model.t5_encoder.embed_tokens
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
class AdapterT5Stack(nn.Module):
def __init__(self, model, adapter_list, model_parallel, adapter_size):
super().__init__()
self.model = model
self.adapter_num = len(adapter_list)
self.adapter_skip_layers = 6
self.adapter_list = adapter_list
if model_parallel:
self.parallelize()
for p in model.parameters():
p.requires_grad = False
self.adapter = nn.ModuleList([Adapter({
'project_hidden_size': model.embed_tokens.weight.shape[1],
'adapter_size': adapter_size,
'num_hidden_layers': 2,
'adapter_initializer_range': 0.0002,
'device': next(self.model.block[i].parameters()).device,
'is_decoder': self.model.is_decoder
}) for i in self.adapter_list])
self.embed_tokens = self.model.embed_tokens
def parallelize(self):
self.model.parallelize()
def forward(self, encoder_hidden_states=None, *args, **kwargs):
kwargs['output_hidden_states'] = True
outputs = self.model(*args, **kwargs)
sequence_output = outputs['last_hidden_state']
hidden_states = outputs['hidden_states']
num = len(hidden_states)
hidden_states_last = torch.zeros(sequence_output.size()).to(sequence_output.device)
adapter_hidden_states = []
adapter_hidden_states_count = 0
for i, adapter_module in enumerate(self.adapter):
hidden_states_last = hidden_states_last.to(hidden_states[self.adapter_list[i]].device)
fusion_state = hidden_states[self.adapter_list[i]] + hidden_states_last
hidden_states_last = adapter_module(fusion_state, encoder_hidden_states=encoder_hidden_states)
adapter_hidden_states.append(hidden_states_last)
adapter_hidden_states_count += 1
if self.adapter_skip_layers >= 1: # if adapter_skip_layers>=1, skip connection
if adapter_hidden_states_count % self.adapter_skip_layers == 0:
hidden_states_last = hidden_states_last + adapter_hidden_states[int(adapter_hidden_states_count/self.adapter_skip_layers)]
outputs['last_hidden_state'] = hidden_states_last
return outputs
class DummyFT5Decoder(Seq2SeqDecoder):
def __init__(self, decoder):
super().__init__()
# FIXME: variable name t5_encoder is for compatibility, change it later.
self.t5_encoder = decoder
self.embed_tokens = self.t5_encoder.embed_tokens
def forward(self, *args, **kwargs):
return self.t5_encoder(*args, **kwargs)
class PromptFT5Encoder(Seq2SeqEncoder):
def __init__(self, encoder, n, first_extra_token_idx=10):
super().__init__()
assert n + first_extra_token_idx <= 100, "T5 extract tokens must <= 100"
self.n = n
# assert isinstance(encoder, T5Stack)
self.t5_encoder = encoder
original_embed = encoder.embed_tokens.weight
self.soft_prompt_embed = nn.Embedding(n, original_embed.size(1))
self.soft_prompt_embed.weight.data = encoder.embed_tokens.weight[32000+first_extra_token_idx:32000+first_extra_token_idx+n].clone().detach().to(original_embed.device)
self.soft_prompt_embed.requires_grad_(True)
# self.register_buffer('soft_prompt_embed', soft_prompt_embed)
def forward(self, src_tokens, src_seq_len):
embeddings = self.t5_encoder.embed_tokens(src_tokens)
prompt_embeddings = self.soft_prompt_embed.weight.repeat((src_tokens.size(0), 1, 1))
embeddings = torch.cat([prompt_embeddings, embeddings], dim=1)
mask = seq_len_to_mask(src_seq_len+self.n, max_len=embeddings.size(1))
dict = self.t5_encoder(inputs_embeds=embeddings, attention_mask=mask, return_dict=True,
output_hidden_states=True)
encoder_outputs = dict.last_hidden_state[:, self.n:, :]
hidden_states = tuple(x[:, self.n:, :] for x in dict.hidden_states)
return encoder_outputs, mask[:, self.n:], hidden_states
def parallelize(self):
self.t5_encoder.parallelize()
class PromptFT5Decoder(PromptFT5Encoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.embed_tokens = self.t5_encoder.embed_tokens
self.decoder_soft_prompt_embed = nn.Embedding(self.n, self.embed_tokens.weight.data.size(1))
self.decoder_soft_prompt_embed.weight.data = self.soft_prompt_embed.weight.data.clone().detach().to(self.embed_tokens.weight.data.device)
self.decoder_soft_prompt_embed.weight.data += torch.randn_like(self.decoder_soft_prompt_embed.weight.data)
self.decoder_soft_prompt_embed.requires_grad_(True)
def forward(self, input_ids, *args, **kwargs):
embeddings = self.t5_encoder.embed_tokens(input_ids)
prompt_embeddings = self.decoder_soft_prompt_embed.weight.repeat((input_ids.size(0), 1, 1))
embeddings = torch.cat([prompt_embeddings, embeddings], dim=1)
# FIXME: Need this line?
# mask = seq_len_to_mask(src_seq_len+self.n, max_len=embeddings.size(1))
dict = self.t5_encoder(inputs_embeds=embeddings, *args, **kwargs)
if dict.hidden_states:
dict.hidden_states = tuple(x[:, self.n:, :] for x in dict.hidden_states)
dict.last_hidden_state = dict.last_hidden_state[:, self.n:, :]
return dict
class FT5Encoder(Seq2SeqEncoder):
def __init__(self, encoder):
super().__init__()
# assert isinstance(encoder, T5Stack)
self.t5_encoder = encoder
def forward(self, src_tokens, src_seq_len):
mask = seq_len_to_mask(src_seq_len, max_len=src_tokens.size(1))
dict = self.t5_encoder(input_ids=src_tokens, attention_mask=mask, return_dict=True,
output_hidden_states=True)
encoder_outputs = dict.last_hidden_state
hidden_states = dict.hidden_states
return encoder_outputs, mask, hidden_states
def parallelize(self):
self.t5_encoder.parallelize()
class FT5Decoder(Seq2SeqDecoder):
def __init__(self, decoder, pad_token_id, label_ids, use_encoder_mlp, truncate_decoded=True):
super().__init__()
# assert isinstance(decoder, T5Stack)
self.decoder = decoder
self.pad_token_id = pad_token_id
self.label_start_id = min(label_ids)
self.label_end_id = max(label_ids)+1
mapping = torch.LongTensor([EOS_ID]+label_ids) # T5 has no bos; eos is 1
self.register_buffer('mapping', mapping)
self.src_start_index = len(mapping) # 加上一个
self.truncate_decoded = truncate_decoded
if use_encoder_mlp:
hidden_size = decoder.embed_tokens.weight.size(1)
self.encoder_mlp = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Dropout(0.3),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size))
logging.warning("FIXME: This version of T5Decoder forces `first` to be None.")
def parallelize(self):
self.decoder.parallelize()
def forward(self, tokens, state):
encoder_outputs = state.encoder_output
first = state.first
# tokens之后的0全是padding,因为1是eos, 在pipe中规定的
cumsum = tokens.eq(PAD_ID).flip(dims=[1]).cumsum(dim=-1)
tgt_pad_mask = cumsum.flip(dims=[1]).ne(cumsum[:, -1:])
# 把输入做一下映射
mapping_token_mask = tokens.lt(self.src_start_index) # 为1的地方应该从mapping中取index
mapped_tokens = tokens.masked_fill(tokens.ge(self.src_start_index), 0)
self.mapping = self.mapping.to(mapped_tokens.device)
tag_mapped_tokens = self.mapping[mapped_tokens]
src_tokens_index = tokens - self.src_start_index # bsz x num_src_token
src_tokens_index = src_tokens_index.masked_fill(src_tokens_index.lt(0), 0)
src_tokens = state.src_tokens
first = None
if first is not None:
src_tokens = src_tokens.gather(index=first, dim=1)
word_mapped_tokens = src_tokens.gather(index=src_tokens_index, dim=1)
tokens = torch.where(mapping_token_mask, tag_mapped_tokens, word_mapped_tokens)
tokens = tokens.masked_fill(tgt_pad_mask, self.pad_token_id)
if self.training:
tokens = self._shift_right(tokens)
if self.truncate_decoded and random.random() < 0.05:
T = tokens.size(1)
tokens = tokens[:, :random.randint(1, T)]
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
return_dict=True)
else:
if state.past_key_values is None:
tokens = torch.full(tokens.shape, PAD_ID, device=tokens.device)
past_key_values = state.past_key_values
dict = self.decoder(input_ids=tokens,
encoder_hidden_states=encoder_outputs,
past_key_values=past_key_values,
use_cache=True,
return_dict=True)
hidden_state = dict.last_hidden_state # bsz x max_len x hidden_size
if not self.training:
state.past_key_values = dict.past_key_values
logits = hidden_state.new_full((hidden_state.size(0), hidden_state.size(1), self.src_start_index+src_tokens.size(-1)),
fill_value=-1e24)
# 首先计算的是
# eos_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[EOS_ID:EOS_ID+1]) # bsz x max_len x 1
hidden_state = hidden_state.to(self.decoder.embed_tokens.weight.device)
tag_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[self.label_start_id:self.label_end_id]) # bsz x max_len x num_class
eos_scores = F.linear(hidden_state, self.decoder.embed_tokens.weight[self.label_end_id:self.label_end_id+1]) # bsz x max_len x 1
# bsz x max_word_len x hidden_size
src_outputs = state.encoder_output
src_outputs = src_outputs.to(self.decoder.embed_tokens.weight.device)
if first is not None:
mask = first.eq(0) # bsz x 1 x max_word_len, 为1的地方是padding
src_outputs = src_outputs.gather(index=first.unsqueeze(2).repeat(1, 1, src_outputs.size(-1)), dim=1)
else:
mask = state.encoder_mask.eq(0)
if hasattr(self, 'encoder_mlp'):
self.encoder_mlp.to(src_outputs.device)
src_outputs = self.encoder_mlp(src_outputs)
# If prompt is added, remove it for decoder
if src_tokens.shape != mask.shape:
mask = mask[:, 1:]
src_outputs = src_outputs[:, 1:]
mask = mask.unsqueeze(1).__or__(src_tokens.eq(EOS_ID).cumsum(dim=1).bool().unsqueeze(1))
word_scores = torch.einsum('blh,bnh->bln', hidden_state, src_outputs) # bsz x max_len x max_word_len
word_scores = word_scores.masked_fill(mask, -1e32)
logits[:, :, :1] = eos_scores
logits[:, :, 1:self.src_start_index] = tag_scores
logits[:, :, self.src_start_index:] = word_scores
return logits
def decode(self, tokens, state):
return self(tokens, state)[:, -1]
def _shift_right(self, input_ids):
assert self.pad_token_id is not None
decoder_start_token_id = self.pad_token_id
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
def fix_loaded_state_dict(trained_state_dict):
for k, v in trained_state_dict.items():
if k == 'seq2seq_model.decoder.mapping':
yield 'shared.weight', trained_state_dict['seq2seq_model.encoder.t5_encoder.embed_tokens.weight']
elif 'encoder' in k:
i = k.rfind('encoder')
yield k[i:], v
elif 'decoder' in k:
i = k.rfind('decoder')
yield k[i:], v
class T5Seq2SeqModel(Seq2SeqModel):
@classmethod
def build_model(cls, bart_model, tokenizer, label_ids, decoder_type=None,
use_encoder_mlp=False, num_prompt_tokens=False, checkpoint_path=None,
model_parallel=False, use_adapter=False, adapter_size=-1):
model = T5Model.from_pretrained(bart_model)
if model_parallel:
model.parallelize()
if num_prompt_tokens > 0:
encoder = PromptFT5Encoder(model.encoder, n=num_prompt_tokens)
# decoder = PromptFT5Decoder(model.decoder, n=num_prompt_tokens)
decoder = DummyFT5Decoder(model.decoder)
else:
encoder = FT5Encoder(model.encoder)
decoder = DummyFT5Decoder(model.decoder)
enc = encoder.t5_encoder
num_tokens, _ = enc.embed_tokens.weight.shape
# FIXME: Speed up T5: T5's vocab of 32128 has no need to resize_token_embeddings here
# model.resize_token_embeddings(len(tokenizer.unique_no_split_tokens)+num_tokens)
if use_adapter:
N = len(enc.block)
adapter_list = [0, N // 3 - 1, N // 3 * 2 - 1, N-1]
encoder = MyAdapterT5Stack(encoder, adapter_list, model_parallel, adapter_size)
decoder = MyAdapterT5Stack(decoder, adapter_list, model_parallel, adapter_size)
else:
encoder = encoder
decoder = decoder
__normalize = lambda x: (x - x.mean()) / x.std() * 0.4356 - 0.0094
_tokenizer = T5Tokenizer.from_pretrained(bart_model, local_files_only=True)
for token in tokenizer.unique_no_split_tokens:
if token[:2] == '<<': # 特殊字符
index = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(token))
if len(index)>1:
raise RuntimeError(f"{token} wrong split")
else:
index = index[0]
if not index>=num_tokens:
logging.warning(f"special token {token} has index {index}, which is larger than {num_tokens}, which is possible for T5 though. See https://github.com/huggingface/transformers/issues/4875")
indexes = _tokenizer.convert_tokens_to_ids(_tokenizer.tokenize(token[2:-2]))
embed = enc.embed_tokens.weight.data[indexes[0]]
for i in indexes[1:]:
embed += model.decoder.embed_tokens.weight.data[i]
embed /= len(indexes)
model.decoder.embed_tokens.weight.data[index] = __normalize(embed)
if decoder_type is None:
decoder = FT5Decoder(decoder, pad_token_id=tokenizer.pad_token_id, label_ids=label_ids, use_encoder_mlp=use_encoder_mlp)
else:
raise RuntimeError("Unsupported feature.")
decoder.decoder.embed_tokens.weight.data[decoder.label_end_id] = __normalize(decoder.decoder.embed_tokens.weight.data[EOS_ID])
model = cls(encoder=encoder, decoder=decoder)
if checkpoint_path is not None:
trained = torch.load(checkpoint_path)
if hasattr(trained, 'state_dict'):
trained = trained.state_dict()
if trained.keys() != model.state_dict().keys():
trained = {k[len('seq2seq_model.'):]: v \
for k, v in trained.items()}
if trained.keys() != model.state_dict().keys():
import pdb; pdb.set_trace()
trained = dict(fix_loaded_state_dict(trained))
model.load_state_dict(trained)
logging.info(f"Loading {checkpoint_path} succeeded.")
return model
def prepare_state(self, src_tokens, src_seq_len=None, first=None, tgt_seq_len=None):
encoder_outputs, encoder_mask, hidden_states = self.encoder(src_tokens, src_seq_len)
src_embed_outputs = hidden_states[0]
state = BartState(encoder_outputs, encoder_mask, src_tokens, first, src_embed_outputs)
# setattr(state, 'tgt_seq_len', tgt_seq_len)
return state
def forward(self, src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first):
"""
:param torch.LongTensor src_tokens: source的token
:param torch.LongTensor tgt_tokens: target的token
:param torch.LongTensor first: 显示每个, bsz x max_word_len
:param torch.LongTensor src_seq_len: src的长度
:param torch.LongTensor tgt_seq_len: target的长度,默认用不上
:return: {'pred': torch.Tensor}, 其中pred的shape为bsz x max_len x vocab_size
"""
state = self.prepare_state(src_tokens, src_seq_len, first, tgt_seq_len)
decoder_output = self.decoder(tgt_tokens, state)
if isinstance(decoder_output, torch.Tensor):
return {'pred': decoder_output}
elif isinstance(decoder_output, (tuple, list)):
return {'pred': decoder_output[0]}
else:
raise TypeError(f"Unsupported return type from Decoder:{type(self.decoder)}")
def parallelize(self):
model.parallelize()
class BartState(State):
def __init__(self, encoder_output, encoder_mask, src_tokens, first, src_embed_outputs):
super().__init__(encoder_output, encoder_mask)
self.past_key_values = None
self.src_tokens = src_tokens
self.first = first
self.src_embed_outputs = src_embed_outputs
def reorder_state(self, indices: torch.LongTensor):
print("What? This function can be called? I have not tested it yet!")
import pdb; pdb.set_trace()
super().reorder_state(indices)
self.src_tokens = self._reorder_state(self.src_tokens, indices)
if self.first is not None:
self.first = self._reorder_state(self.first, indices)
self.src_embed_outputs = self._reorder_state(self.src_embed_outputs, indices)
if self.past_key_values is not None:
new = []
for layer in self.past_key_values:
new_layer = {}
for key1 in list(layer.keys()):
new_layer_ = {}
for key2 in list(layer[key1].keys()):
if layer[key1][key2] is not None:
layer[key1][key2] = self._reorder_state(layer[key1][key2], indices)
# print(key1, key2, layer[key1][key2].shape)
new_layer_[key2] = layer[key1][key2]
new_layer[key1] = new_layer_
new.append(new_layer)
self.past_key_values = new
# class T5Seq2SeqModel(OldT5Seq2SeqModel):
# def __init__(self, encoder, decoder):
# super().__init__(encoder, decoder)
# special_token = 32127
# special_token = encoder.t5_encoder.embed_tokens.weight[-1].clone().detach()
# special_token.requires_grad_(True)
# self.register_buffer('special_token', special_token)
# def forward(self, src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first):
# with torch.no_grad():
# # assert self.special_token not in src_tokens
# B = src_tokens.size(0)
# special_tokens = repeat(self.special_token, f'd -> {B} d')
# src_tokens = torch.cat((special_tokens, src_tokens), dim=1)
# src_seq_len += 1
# first += (first > 0)
# # FIXME: first at padding
# return super().forward(src_tokens, tgt_tokens, src_seq_len, tgt_seq_len, first)
|
"""Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
features. Clearly an interactive application can benefit by
specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import __builtin__
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError,'namespace must be a dictionary'
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if hasattr(val, '__call__'):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
seen = {"__builtins__"}
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
seen.add(word)
matches.append(word)
for nspace in [self.namespace, __builtin__.__dict__]:
for word, val in nspace.items():
if word[:n] == text and word not in seen:
seen.add(word)
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = set(dir(thisobject))
words.discard("__builtins__")
if hasattr(thisobject, '__class__'):
words.add('__class__')
words.update(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr:
try:
val = getattr(thisobject, word)
except Exception:
continue # Exclude properties that are not set
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
matches.sort()
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
|
import urllib.parse
import RawArchiver.ModuleBase
class ElscioneRawModule(RawArchiver.ModuleBase.RawScraperModuleBase):
module_name = "MiscRawModule"
target_urls = [
'https://server.elscione.com/',
]
target_tlds = [urllib.parse.urlparse(tmp).netloc for tmp in target_urls]
badwords = [
'server.elscione.com/Music/',
'server.elscione.com/Anime/',
]
@classmethod
def cares_about_url(cls, url):
if any([badword in url for badword in cls.badwords]):
return False
if RawArchiver.ModuleBase.duplicate_path_fragments(url):
return False
return urllib.parse.urlparse(url).netloc in cls.target_tlds
@classmethod
def get_start_urls(cls):
return [tmp for tmp in cls.target_urls]
@staticmethod
def get_max_active_jobs():
return 5
|
import time
def complex_computation(a,b):
time.sleep(.5)
return a+b
cache = {}
def cached_computation(a,b):
if (a,b) in cache:
return cache[(a,b)]
else:
result = complex_computation(a,b)
cache[(a,b)] = result
return result
def top_arts():
key = 'top'
if key in cache:
print cache[key]
else:
cache[key]=['s','s']
print cache
print top_arts()
"""start_time = time.time()
print cached_computation(5, 3)
print "the first computation took %f seconds" % (time.time() - start_time)
start_time2 = time.time()
print cached_computation(5, 3)
print "the second computation took %f seconds" % (time.time() - start_time2)
"""
|
import os
import unittest
from robot_skills.mockbot import Mockbot
from robot_smach_states.human_interaction import ShowImage
class TestShowImage(unittest.TestCase):
def test_show_image(self):
robot = Mockbot()
# Test if state returns succeeded if file exists
with open("/tmp/foo", "w") as f:
f.write("bar")
state = ShowImage(robot, "/tmp/foo")
self.assertEqual(state.execute(), "succeeded")
os.remove("/tmp/foo")
# Test if state returns failed if file does not exist
state = ShowImage(robot, "/tmp/bar")
self.assertEqual(state.execute(), "failed")
if __name__ == '__main__':
unittest.main()
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from model_utils.models import TimeStampedModel
# from questions.models import Question
from profiles.models import Profile
class QuestionList(TimeStampedModel):
name = models.CharField(max_length=20)
slug = models.SlugField(unique=True)
user = models.ForeignKey(User)
num_questions = models.SmallIntegerField(default=0)
def __str__(self):
return self.name
class QuestionListItem(models.Model):
list = models.ForeignKey(QuestionList)
question = models.ForeignKey('questions.Question')
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
if self.list:
list = self.list
else:
list = ""
if self.question:
question = self.question
else:
question = ""
return "%s %s %s" % (self.pk, question.slug, list)
|
import numpy as np
from .base import Base
class Step(Base):
def __init__(self, return_test=True, scale_X=True, scale_y=True,
mean_normalize_y=False, test_train_ratio=2, s_to_n_ratio=None,
noise_variance=10**-4, scaler='std', Min=-1, Max=1, num_low=25, num_high=25,
gap=-0.1, random_state=0, backend=None):
synthetic = True
file_name = None
X = np.vstack((np.linspace(Min, -gap/2.0, num_low)[:, np.newaxis],
np.linspace(gap/2.0, Max, num_high)[:, np.newaxis]))
y = np.vstack((np.zeros((num_low, 1)), np.ones((num_high, 1))))
f = lambda x: y
X_names = ['X']
y_names = ['y']
super().__init__(X=X,
y=y,
f=f,
file_name=file_name,
X_names=X_names,
y_names=y_names,
return_test=return_test,
scale_X=scale_X,
scale_y=scale_y,
mean_normalize_y=mean_normalize_y,
test_train_ratio=test_train_ratio,
s_to_n_ratio=s_to_n_ratio,
noise_variance=noise_variance,
scaler=scaler,
backend=backend,
random_state=random_state,
synthetic=synthetic)
|
#AoC2015 day3 part1
def go (c, a):
if c == '^': a[0]+=1
if c == 'v': a[0]-=1
if c == '>': a[1]+=1
if c == '<': a[1]-=1
#return a
a = [0,0] #pion, poziom
llist = ['0,0']
counter = 1
with open("input") as f:
while True:
c = f.read(1)
if not c: break
go (c,a)
#print a
x = ','.join(str(i) for i in a)
if x not in llist:
llist.append(x)
#print llist
counter+=1
print counter
|
import numpy as np
import pyopencl as cl
from kemp.fdtd3d.util import common, common_gpu
from fields import Fields
class CoreSplit2:
def __init__(self, fields):
"""
"""
common.check_type('fields', fields, Fields)
# local variables
context = fields.context
ns_pitch = fields.ns_pitch
pad = fields.pad
precision_float = fields.precision_float
dtype_str_list = fields.dtype_str_list
ce_on = fields.ce_on
ch_on = fields.ch_on
eh_bufs = fields.eh_bufs
if ce_on:
ce_bufs = fields.ce_bufs
if ch_on:
ch_bufs = fields.ch_bufs
ls = fields.ls
# program
str_pad = '' if pad==0 else '-%s' % pad
coeff_constant = {'single': '0.5f', 'double': '0.5'}[precision_float]
macros = ['ARGS_CE', 'CEX', 'CEY', 'CEZ', \
'ARGS_CH', 'CHX', 'CHY', 'CHZ', \
'DX', 'PAD', 'DTYPE', 'PRAGMA_fp64']
values = ['', coeff_constant, coeff_constant, coeff_constant, \
'', coeff_constant, coeff_constant, coeff_constant, \
str(ls), str_pad] + dtype_str_list
if ce_on:
values[:4] = [ \
', __global DTYPE *cex, __global DTYPE *cey, __global DTYPE *cez', \
'cex[idx]', 'cey[idx]', 'cez[idx]']
if ch_on:
values[4:8] = [ \
', __global DTYPE *chx, __global DTYPE *chy, __global DTYPE *chz', \
'chx[idx]', 'chy[idx]', 'chz[idx]']
ksrc = common.replace_template_code( \
open(common_gpu.src_path + 'core_split.cl').read(), macros, values)
program = cl.Program(context, ksrc).build()
# arguments
e_args = ns_pitch + eh_bufs
h_args = ns_pitch + eh_bufs
if ce_on:
e_args += ce_bufs
if ch_on:
h_args += ch_bufs
nx, ny, nz_pitch = ns_pitch
nyzp = ny * nz_pitch
e_args_dict = { \
'': [np.int32(0), np.int32(nx*nyzp)] + e_args, \
'pre': [np.int32(0), np.int32(nyzp)] + e_args, \
'post': [np.int32(nyzp), np.int32(nx*nyzp)] + e_args}
h_args_dict = { \
'': [np.int32(0), np.int32(nx*nyzp)] + h_args, \
'pre': [np.int32((nx-1)*nyzp), np.int32(nx*nyzp)] + h_args, \
'post': [np.int32(0), np.int32((nx-1)*nyzp)] + h_args}
gs = lambda n: int(n) if (n % fields.ls) == 0 else int(n - (n % fields.ls) + fields.ls)
gs_dict = { \
'': gs(nx*nyzp), \
'pre': gs(nyzp), \
'post': gs((nx-1)*nyzp)}
# global variables and functions
self.mainf = fields
self.program = program
self.e_args_dict = e_args_dict
self.h_args_dict = h_args_dict
self.gs_dict = gs_dict
# append to the update list
#self.priority_type = 'core'
#self.mainf.append_instance(self)
def update_e(self, part=''):
self.program.update_e(self.mainf.queue, (self.gs_dict[part],), (self.mainf.ls,), *self.e_args_dict[part])
def update_h(self, part=''):
self.program.update_h(self.mainf.queue, (self.gs_dict[part],), (self.mainf.ls,), *self.h_args_dict[part])
|
import datetime
import os
import sys
import time
from pathlib import Path
import numpy as np
import pandas as pd
README = """Module to prepare data for modelling
Sources:
- interactions
- books string features
- books numeric features
- full_interactions.feather - 228 million reader-book interactions
- interactions.feather - 88 million reader-book interactions (with known books)
- `simple_book_features.feather` - numberic book feats
"""
# SAMPLE PARAMS
MIN_READS = 50
TOP_BOOKS = 1_000
TOP_USES = 50_000
def active_filter(data, min_reads=50) -> pd.DataFrame:
"""Filter interactions data by activity threshold (min_reads=50).
"""
df_user_counts = pd.DataFrame(data.groupby('user_id').size(), columns=['count'])
df_book_counts = pd.DataFrame(data.groupby('book_id').size(), columns=['count'])
active_users = df_user_counts[df_user_counts['count'] >= min_reads].index.values
active_user_filter = data['user_id'].isin(active_users).values
popular_books = df_book_counts[df_book_counts['count'] >- min_reads].index.values
popular_books_filter = data['book_id'].isin(popular_books).values
return data[active_user_filter & popular_books_filter]
def get_top_x_users(data, top_x=10_000):
"""Get top (x) users by interactions
returns: array of book_ids
"""
df_user_counts = pd.DataFrame(data.groupby('user_id').size(), columns=['count'])
return df_user_counts.sort_values('count', ascending=False).head(top_x).index.values
def get_top_x_books(data, top_x=10_000):
"""Get top (x) books by interactions
returns: array of book_ids
"""
df_book_counts = pd.DataFrame(data.groupby('book_id').size(), columns=['count'])
return df_book_counts.sort_values('count', ascending=False).head(top_x).index.values
if __name__ == '__main__':
# contains some one-time data processing
# # reals all 228 million user-book interactions
# # limit only interactions with known books
# dfi = dfi.merge(dfb[['book_id']])
# # 228 million (with 2.3 million books) to 88 million (with 438k books)
start = time.time()
titles = pd.read_parquet('data/titles.snap.parquet')
dfi = pd.read_parquet('data/goodreads_interactions.snap.parquet')
dfbooks = pd.read_parquet('data/books_extra_features.snap.parquet')
dff = active_filter(dfi, min_reads=MIN_READS)
time_taken = time.time() - start
### Select a representative sample to build off
## grab 10k from popular online set
## supplement with top 1000k by straight interactions
df_books = pd.read_csv('data/df_books.csv')
sample_book_ids = df_books['goodreads_book_id']
top_x_book_ids = get_top_x_books(dff, 1000)
# sample_books_ids
# randomly sample 8_000
user_ids = get_top_x_users(dff, 75_000)
book_ids = set(set(sample_book_ids.values).union(top_x_book_ids))
# template with normalized book_id and title
dftmp = dft.loc[dft['book_id'].isin(book_ids)].reset_index(drop=True)
dftmp['goodreads_book_id'] = dftmp['book_id']
dftmp['goodreads_book_id'] = dftmp.sort_values('book_id')
dftmp['book_id'] = range(dftmp.shape[0])
dftmp.set_index('book_id')
dftmp.to_pickle('data/filter/filtered_titles.pkl')
dff.rename(columns={'book_id':'goodreads_book_id'}, inplace=True)
df_ = dftmp.merge(dff, right_on=['good_readsbook_id'], left_on=['goodreads_book_id'])
# df_.to_feather('data/filter/all_interactions.feather')
is_read = (df_.is_read == 1)
is_top_user = (df_.user_id.isin(user_ids))
dff = df_.loc[is_read & is_top_user]
book_user_mat = dff.pivot(
index='book_id', columns='user_id', values='rating').fillna(0)
book_to_idx = {
book: i for i, book in
enumerate(list(dftmp.loc[book_user_mat.index].title))
}
# dffilter = dff.loc[dff.book_id.isin(top_x_book_ids)]
## will need to down sample do to time constraints
# recommend based on top 10k books by popularity + 750
# by interactions
df_filter = dff.loc[dff['book_id'].isin(book_ids)]
df_filter = df_filter.loc[df_filter['user_id'].isin(top_user_ids)]
df_filter.shape # ~50 million interactions
# ## Item Based Recommendation with KNN
# # Format data for user-item recommenations
# # Transform df_ratings (dfi) into an (m x n) array
# # m (# books)
# # n (# users)
start = time.time()
book_user_matrix = df_filter.pivot(
index='book_id', columns='user_id', values='rating').fillna(0)
# book_user_matrix.to_pickle('data/filter/book_user_matrix_.pkl')
seconds = time.time() - start
dft.set_index('book_id').loc[matrix.index]
# dfi.merge(df_books.rename('good_reads_book_id': 'book_id'), on=[])
dfq = df_books.drop('book_id', 1).merge(dfi, left_on=['goodreads_book_id'], right_on=['book_id'])
dft.set_index('book_id').loc[sample_book_ids]
start = time.time()
book_user_matrix = dfq.pivot(
index='book_id', columns='user_id', values='rating').fillna(0)
# book_user_mat.to_pickle('data/filter/book_user_matrix_.pkl')
seconds = time.time() - start
# think
dfr = dfi.loc[dfi.is_read]
#### most basic
df_books = pd.read_csv('data\df_books.csv')
df_ratings = pd.read_csv('data\df_ratings.csv')
book_user_mat = df_ratings.pivot(index='book_id', columns='user_id', values='rating').fillna(0)
book_to_idx = {
book: i for i, book in
enumerate(list(df_books.set_index('book_id').loc[book_user_mat.index].title))
}
sparse_matrix = csr_matrix(book_user_mat.values)
model_knn = NearestNeighbors(metric='cosine',
algorithm='brute',
n_neighbors=20,
n_jobs=-1)
model_knn.fit(sparse_matrix)
book_name = 'Harry Botter and the Chamber of Secrets'
idx = fuzzy_matching(book_to_idx, book_name)
### sampling technique
# revisit
# df_books = pd.read_csv('data/df_books.csv')
# sample_book_ids = df_books['goodreads_book_id']
dfbooks = dft.loc[dft['book_id'].isin(sample_book_ids)]
top1000books = get_top_x_books(dfi, 1000)
# # recommend based on top 10k books by popularity
# top_x = 750
# top_ids = get_top_x_books(dff)
df = dff |
# Copyright (c) 2016-2018, University of Idaho
# All rights reserved.
#
# Roger Lew (rogerlew@gmail.com)
#
# The project described was supported by NSF award number IIA-1301792
# from the NSF Idaho EPSCoR Program and by the National Science Foundation.
# standard libraries
import os
from os.path import join as _join
from os.path import exists as _exists
import time
# non-standard
import jsonpickle
# weppy submodules
from .base import NoDbBase
from wepppy.all_your_base import isfloat
class PrepNoDbLockedException(Exception):
pass
class Prep(NoDbBase):
__name__ = 'Prep'
def __init__(self, wd, cfg_fn):
super(Prep, self).__init__(wd, cfg_fn)
self.lock()
# noinspection PyBroadException
try:
self._sbs_required = False
self._has_sbs = False
self._timestamps = {}
self.dump_and_unlock()
except Exception:
self.unlock('-f')
raise
# noinspection PyPep8Naming
@staticmethod
def getInstance(wd):
with open(_join(wd, 'prep.nodb')) as fp:
db = jsonpickle.decode(fp.read())
assert isinstance(db, Prep), db
if _exists(_join(wd, 'READONLY')):
db.wd = os.path.abspath(wd)
return db
if os.path.abspath(wd) != os.path.abspath(db.wd):
db.wd = wd
db.lock()
db.dump_and_unlock()
return db
@property
def sbs_required(self):
return getattr(self, '_sbs_required', False)
@sbs_required.setter
def sbs_required(self, v: bool):
self.lock()
try:
self._sbs_required = v
self.dump_and_unlock()
except:
self.unlock('-f')
raise
@property
def has_sbs(self):
return getattr(self, '_has_sbs', False)
@has_sbs.setter
def has_sbs(self, v: bool):
self.lock()
try:
self._has_sbs = v
self.dump_and_unlock()
except:
self.unlock('-f')
raise
@property
def _nodb(self):
return _join(self.wd, 'prep.nodb')
@property
def _lock(self):
return _join(self.wd, 'prep.nodb.lock')
def timestamp(self, key):
now = int(time.time())
self.__setitem__(key, now)
def __setitem__(self, key, value: int):
self.lock()
try:
self._timestamps[key] = value
self.dump_and_unlock()
except:
self.unlock('-f')
raise
def __getitem__(self, key):
return self._timestamps.get(key, None)
|
from genetic_algo import genetic_optimize
from graph import plot_graph
import time
from file_handle import write_csv_file
# This tells the number of times the algorithm will run
total_no_of_iter = 10
# POP_SIZE = 2048
# MUT_PROB = 0.25
# ELITE_RATIO = 0.1
# GENERATION = 96
# final parameter
POP_SIZE = 2500
MUT_PROB = 0.11
ELITE_RATIO = 0.5
GENERATION = 50
# POP_SIZE = 1700
# MUT_PROB = 0.03
# ELITE_RATIO = 0.5
# GENERATION = 200
CSV_FILE_PATH = 'multiple_mutations.csv'
def get_average(nested_lst):
result_list = [sum(i)/len(nested_lst) for i in zip(*nested_lst)]
return result_list
def test_with_multiple_generations(domain, schedule_cost):
# This is for seeing the costs with different generation for Genetic Algorithm
# all_gen_cost_lst = []
# all_gen_time_lst = []
for num in range(1, total_no_of_iter + 1):
print(f'\n Iteration {num} / {total_no_of_iter}: \n')
print(f' gen, cost, time')
sol, sol_cost, n = genetic_optimize(domain, schedule_cost,
popsize=POP_SIZE, mut_prob=MUT_PROB,
elite_ratio=ELITE_RATIO, n=250)
print('--------------------')
'''
def test_with_multiple_generations(domain, schedule_cost):
# This is for seeing the costs with different generation for Genetic Algorithm
all_gen_cost_lst = []
all_gen_time_lst = []
for num in range(1, total_no_of_iter + 1):
print(f'\n Iteration {num} / {total_no_of_iter}: \n')
print(f' gen, cost, time')
gen_cost_lst = []
gen_time_lst = []
gen_lst = []
gen_counter = 50
while gen_counter <= 250:
# store starting time
begin = time.time()
sol, sol_cost, n = genetic_optimize(domain, schedule_cost,
popsize=POP_SIZE, mut_prob=MUT_PROB,
elite_ratio=ELITE_RATIO, n=gen_counter)
time.sleep(1)
# store end time
end = time.time()
time_diff = end - begin
print(f' {gen_counter}, {sol_cost}, {time_diff}')
# write to csv file
#write_csv_file(CSV_FILE_PATH, [POP_SIZE, MUT_PROB, ELITE_RATIO, gen_counter, sol_cost, time_diff])
gen_lst.append(gen_counter)
gen_cost_lst.append(sol_cost)
gen_time_lst.append(time_diff)
#gen_counter += 100
gen_counter += 50
all_gen_cost_lst.append(gen_cost_lst)
all_gen_time_lst.append(gen_time_lst)
print('---------------------')
print('\n -------- Average costs and time --------- \n')
average_costs = get_average(all_gen_cost_lst)
average_time = get_average(all_gen_time_lst)
column_names = ['Generation', 'Average Cost', 'Average time(in seconds)']
print(f'Gen, Avg Cost, Avg time(s)')
for indx in range(len(average_costs)):
# write to csv file
rounded_average_cost = round(average_costs[indx], 3)
rounded_average_time = round(average_time[indx], 3)
write_csv_file(CSV_FILE_PATH, [gen_lst[indx], rounded_average_cost, rounded_average_time], column_names)
print(f'{gen_lst[indx]}, {rounded_average_cost}, {rounded_average_time}')
# plot graph and save
plot_graph(x_lst=gen_lst, y_lst=average_costs, title="Genetic Algorithm: Generation vs Costs", x_label="Generation", y_label="costs depending on generation", filename='multiple_generations')
'''
# Average of 10 iteration: 8.8
# Popsize = 3000, MUT_PROB = 0.25, ELITE_RATIO = 0.1, GENERATION = 50
def test_with_multiple_population(domain, schedule_cost):
# This is for seeing the costs with different population size for Genetic Algorithm
all_popsize_cost_lst = []
all_popsize_time_lst = []
for num in range(1, total_no_of_iter + 1):
print(f'\n Iteration {num} / {total_no_of_iter}: \n')
print(f' pop, cost, time')
popsize_cost_lst = []
popsize_lst = []
popsize_time_lst = []
popsize_counter = 500
while popsize_counter <= 2500:
# store starting time
begin = time.time()
sol, sol_cost, n = genetic_optimize(domain, schedule_cost,
popsize=popsize_counter, mut_prob=MUT_PROB,
elite_ratio=ELITE_RATIO, n=GENERATION)
time.sleep(1)
# store end time
end = time.time()
time_diff = end - begin
print(f' {popsize_counter}, {sol_cost}, {time_diff}')
# write to csv file
#write_csv_file(CSV_FILE_PATH, [popsize_counter, MUT_PROB, ELITE_RATIO, GENERATION, sol_cost, time_diff])
popsize_lst.append(popsize_counter)
popsize_cost_lst.append(sol_cost)
popsize_time_lst.append(time_diff)
popsize_counter += 500
all_popsize_cost_lst.append(popsize_cost_lst)
all_popsize_time_lst.append(popsize_time_lst)
print('---------------------')
print('\n -------- Average costs and time --------- \n')
average_costs = get_average(all_popsize_cost_lst)
average_time = get_average(all_popsize_time_lst)
column_names = ['Population', 'Average Cost', 'Average time(in seconds)']
print(f'Pop, Avg Cost, Avg time(s)')
for indx in range(len(average_costs)):
# write to csv file
rounded_average_cost = round(average_costs[indx], 3)
rounded_average_time = round(average_time[indx], 3)
write_csv_file(CSV_FILE_PATH, [popsize_lst[indx], rounded_average_cost, rounded_average_time], column_names)
print(f'{popsize_lst[indx]}, {rounded_average_cost}, {rounded_average_time}')
# plot graph and save
plot_graph(x_lst=popsize_lst, y_lst=average_costs, title="Genetic Algorithm: Population Size vs Costs", x_label="Population Size", y_label="costs depending on population size", filename='multiple_populations')
def test_with_multiple_mut_prob(domain, schedule_cost):
# This is for seeing the costs with different mutation probability for Genetic Algorithm
all_mut_cost_lst = []
all_mut_time_lst = []
for num in range(1, total_no_of_iter + 1):
print(f'\n Iteration {num} / {total_no_of_iter}: \n')
print(f' mut_prob, cost, time')
mut_cost_lst = []
mut_lst = []
mut_time_lst = []
mut_counter = 0.01
while mut_counter <= 0.26:
# store starting time
begin = time.time()
sol, sol_cost, n = genetic_optimize(domain, schedule_cost,
popsize=POP_SIZE, mut_prob=mut_counter,
elite_ratio=ELITE_RATIO, n=GENERATION)
time.sleep(1)
# store end time
end = time.time()
time_diff = end - begin
print(f' {mut_counter}, {sol_cost}, {time_diff}')
# write to csv file
#write_csv_file(CSV_FILE_PATH, [POP_SIZE, mut_counter, ELITE_RATIO, GENERATION, sol_cost, time_diff])
mut_lst.append(mut_counter)
mut_cost_lst.append(sol_cost)
mut_time_lst.append(time_diff)
mut_counter += 0.05
all_mut_cost_lst.append(mut_cost_lst)
all_mut_time_lst.append(mut_time_lst)
print('---------------------')
print('\n -------- Average costs and time --------- \n')
average_costs = get_average(all_mut_cost_lst)
average_time = get_average(all_mut_time_lst)
column_names = ['Mutation Probability', 'Average Cost', 'Average time(in seconds)']
print(f'Mut, Avg Cost, Avg time(s)')
for indx in range(len(average_costs)):
# write to csv file
rounded_average_cost = round(average_costs[indx], 3)
rounded_average_time = round(average_time[indx], 3)
write_csv_file(CSV_FILE_PATH, [mut_lst[indx], rounded_average_cost, rounded_average_time], column_names)
print(f'{mut_lst[indx]}, {rounded_average_cost}, {rounded_average_time}')
# plot graph and save
title = "Genetic Algorithm: Mutation Probability vs Costs"
x_label = "Mutation Probability"
y_label = "costs depending on mutation probability"
plot_graph(x_lst=mut_lst, y_lst=average_costs, title=title, x_label=x_label, y_label=y_label, filename='multiple_mut_prob')
def test_with_multiple_elite_ratio(domain, schedule_cost):
all_elite_cost_lst = []
all_elite_time_lst = []
for num in range(1, total_no_of_iter + 1):
print(f'\n Iteration {num} / {total_no_of_iter}: \n')
print(f' elite, cost, time')
elite_cost_lst = []
elite_lst = []
elite_time_lst = []
elite_counter = 0.1
while elite_counter <= 0.9:
# store starting time
begin = time.time()
sol, sol_cost, n = genetic_optimize(domain, schedule_cost,
popsize=POP_SIZE, mut_prob=MUT_PROB,
elite_ratio=elite_counter, n=GENERATION)
time.sleep(1)
# store end time
end = time.time()
time_diff = end - begin
print(f' {round(elite_counter, 3)}, {sol_cost}, {time_diff}')
# write to csv file
# write_csv_file(CSV_FILE_PATH, [POP_SIZE, MUT_PROB, elite_counter, GENERATION, sol_cost, time_diff])
elite_lst.append(elite_counter)
elite_cost_lst.append(sol_cost)
elite_time_lst.append(time_diff)
elite_counter += 0.2
all_elite_cost_lst.append(elite_cost_lst)
all_elite_time_lst.append(elite_time_lst)
print('---------------------')
print('\n -------- Average costs and time --------- \n')
average_costs = get_average(all_elite_cost_lst)
average_time = get_average(all_elite_time_lst)
column_names = ['Elite Ratio', 'Average Cost', 'Average time(in seconds)']
print(f'Elite, Avg Cost, Avg time(s)')
for indx in range(len(average_costs)):
# write to csv file
rounded_average_cost = round(average_costs[indx], 3)
rounded_average_time = round(average_time[indx], 3)
write_csv_file(CSV_FILE_PATH, [elite_lst[indx], rounded_average_cost, rounded_average_time], column_names)
print(f'{elite_lst[indx]}, {rounded_average_cost}, {rounded_average_time}')
# plot graph and save
title = "Genetic Algorithm: Elite Ratio vs Costs"
x_label = "Elite Ratio"
y_label = "costs depending on elite ratio"
plot_graph(x_lst=elite_lst, y_lst=average_costs, title=title, x_label=x_label, y_label=y_label, filename='multiple_elite_ratio')
# Average of 10 iteration: 8.6; Min cost: 8
def test_with_multiple_iterations(domain, schedule_cost):
genetic_optimize_costs_lst = []
iteration_num_lst = []
for num in range(1, total_no_of_iter + 1):
print(f'\nIteration {num} / {total_no_of_iter}: \n')
sol, sol_cost, n = genetic_optimize(domain, schedule_cost,
popsize=POP_SIZE, mut_prob=MUT_PROB,
elite_ratio=ELITE_RATIO, n=GENERATION)
print('cost: ', sol_cost)
iteration_num_lst.append(num)
genetic_optimize_costs_lst.append(sol_cost)
# getting average and min costs
genetic_avg = sum(genetic_optimize_costs_lst)/len(genetic_optimize_costs_lst)
print("Average of {} iteration: {}".format(total_no_of_iter, genetic_avg))
min_cost_from_iter = min(genetic_optimize_costs_lst)
print("Min cost: ", min_cost_from_iter)
# plot graph and save
plot_graph(x_lst=iteration_num_lst, y_lst=genetic_optimize_costs_lst, title="Genetic Algorithm", x_label="no of iterations", y_label="costs", filename='multiple_iterations')
if __name__ == '__main__':
# mulitple generation result
# with population size 1700
# mutation probability = 0.25
# elite raio = 0.1
# costs = [
# [10, 10, 10, 10, 10],
# [8, 8, 8, 8, 8],
# [10, 10, 10, 10, 10],
# [10, 10, 10, 10, 10],
# [8, 8, 8, 8, 8],
# [8, 8, 8, 8, 8],
# [8, 8, 8, 8, 8],
# [12, 12, 12, 12, 12],
# [8, 8, 8, 8, 8],
# [8, 8, 8, 8, 8],
# ]
# time = [
# [80.8660638332367, 161.4291741847992, 242.2736370563507, 324.7921440601349, 405.6109471321106],
# [86.08917498588562, 168.00921416282654, 250.22483706474304, 333.23261618614197, 414.80256938934326],
# [78.69724416732788, 161.3247241973877, 246.343092918396, 327.0176639556885, 408.06141996383667],
# [79.21017980575562, 160.41059398651123, 240.20571184158325, 321.06329703330994, 400.7412610054016],
# [80.64266920089722, 160.82655692100525, 241.96409010887146, 324.5357620716095, 405.4657301902771],
# [80.64799213409424, 162.8949110507965, 243.98874020576477, 325.65296506881714, 408.25901103019714],
# [83.83998894691467, 166.2589988708496, 250.05240178108215, 335.41282773017883, 421.65538001060486],
# [85.26930403709412, 171.09963083267212, 255.6997320652008, 336.31591510772705, 419.6387388706207],
# [83.02753710746765, 174.97867894172668, 266.77037525177, 356.705628156662, 450.05363297462463],
# [87.46368622779846, 175.32150506973267, 260.44370317459106, 346.2401821613312, 430.62544894218445],
# ]
# avg costs: [9.0, 9.0, 9.0, 9.0, 9.0]
# avg time: [82.57538404464722, 166.25539882183074, 249.79663214683532, 333.0969001531601, 416.4914139509201]
# with population size 1024
# mutation probability = 0.25
# elite raio = 0.1
costs = [
[10, 10, 10, 10, 10],
[10, 10, 10, 10, 10],
[10, 10, 10, 10, 10],
[10, 10, 10, 10, 10],
[10, 10, 10, 10, 10],
[10, 10, 10, 10, 10],
[12, 10, 10, 10, 10],
[10, 10, 10, 10, 10],
[8, 8, 8, 8, 8],
[12, 12, 12, 12, 12]
]
time = [
[53.067546129226685,105.32186698913574,156.91716027259827,209.58383417129517,260.500773191452],
[48.841296911239624,99.85183882713318,150.71128606796265,201.90972805023193,253.55273699760437],
[50.60228490829468,104.56625986099243,155.5186219215393,206.90059900283813,258.82944989204407],
[51.72835612297058, 105.23974990844727,160.7707781791687,212.56723308563232,265.2596092224121],
[52.62678289413452,105.75621199607849,157.723130941391,210.21576976776123,262.2023718357086],
[54.331494092941284,111.49562120437622,162.81883001327515,213.05765914916992, 264.4934070110321],
[50.97555708885193,108.77151417732239,162.28308701515198,219.71455717086792,278.35750818252563],
[51.85524606704712,105.30560207366943,158.21073627471924,210.90447211265564,268.5050492286682],
[57.558557987213135,113.31611609458923,167.27976512908936,222.54182887077332,275.3733699321747],
[52.750272035598755,105.70203495025635, 158.6443190574646,211.31386280059814,265.3457248210907]
]
# lst = get_average([[2,5,7,9], [3,3,5,6], [2,2,9,3]])
# print(lst)
average_costs = get_average(costs)
average_time = get_average(time)
print('avg costs: ', average_costs)
print('avg time: ', average_time)
#avg costs: [10.2, 10.0, 10.0, 10.0, 10.0]
#avg time: [52.43373942375183, 106.53268160820008, 159.087771487236, 211.87095441818238, 265.24200003147126]
gen_lst = [50, 100, 150, 200, 250]
plot_graph(x_lst=gen_lst, y_lst=average_costs, title="Genetic Algorithm: Generation vs Costs", x_label="Generation", y_label="costs depending on generation", filename='multiple_generations')
|
import sys, argparse
searchString = ""
startDate = ''
endDate = ''
parser = argparse.ArgumentParser()
parser.add_argument("searchString", help="String to search")
parser.add_argument("-s", "--startDate", help="Start Date")
parser.add_argument("-e", "--endDate", help="End Date")
args = parser.parse_args()
searchString = args.searchString
if args.startDate is not None:
startDate = args.startDate
if args.endDate is not None:
endDate = args.endDate
print ("Search String - "+searchString)
print ("Start Date - "+startDate)
print ("End Date - "+endDate) |
class Solution:
def hitBricks(self, grid: List[List[int]], hits: List[List[int]]) -> List[int]:
m, n = len(grid), len(grid[0])
inf = float('inf')
for (i, j) in hits:
if grid[i][j] == 1:
grid[i][j] = -1
def nbrs(i, j):
for di, dj in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
ci, cj = i + di, j + dj
if ci < 0 or ci >= m or cj < 0 or cj >= n: continue
yield ci, cj
def stick(i, j):
grid[i][j] = inf
for ni, nj in nbrs(i, j):
if grid[ni][nj] == 1:
stick(ni, nj)
for j in range(n):
if grid[0][j] == 1: stick(0, j)
ans = []
def put_back(i, j, visited):
grid[i][j] = inf if i == 0 or any(grid[ni][nj] == inf for ni, nj in nbrs(i, j)) else 1
ans = 1 if grid[i][j] == inf else 0
for ni, nj in nbrs(i, j):
if grid[ni][nj] != 1 or (ni, nj) in visited: continue
visited.add((ni, nj))
ans += put_back(ni, nj, visited)
return ans
for (i, j) in hits[::-1]:
ans.append(put_back(i, j, set([(i, j)]))-(grid[i][j] == inf) if grid[i][j] == -1 else 0)
return ans[::-1]
|
#-*-encoding:utf-8-*-
from django.conf.urls import patterns, include, url
from weixin import views
urlpatterns = patterns('',
url(r'^testjs', views.testjs, name='weixin.testjs'),
url(r'^$', views.index, name='weixin.index'),
)
|
class Song:
"""Contains information about a song.
Attributes:
id (str): ID of the song.
title (str): Title of the song.
audio_format (str): Audio audio_format of the song (wav, mp3, etc.)
duration (int): Duration of the song (in seconds).
genre (str): Label for the song.
"""
def __init__(self, song_id, title, audio_format, duration, genre):
self.__id = song_id
self.__title = title
self.__format = audio_format
self.__duration = duration
self.__genre = genre
@staticmethod
def import_from_dictionary(dictionary):
"""Imports a song from a dictionary.
Args:
dictionary (dict): Dictionary of pairs of (attribute, value).
Returns:
song (songs.song.Song): created song from the dictionary.
"""
song = Song(None, None, None, None, None)
song.__id = dictionary["id"]
song.__title = dictionary["title"]
song.__format = dictionary["audio_format"]
song.__duration = dictionary["duration"]
song.__genre = dictionary["genre"]
return song
@property
def id(self):
return self.__id
@property
def title(self):
return self.__title
@property
def audio_format(self):
return self.__format
@property
def duration(self):
return self.__duration
@property
def genre(self):
return self.__genre
|
rule population_assembly:
"""Assembly step 05: Create 'contig dictionary' of all unique contigs present in the study (aka: population assembly)"""
input:
os.path.join(dir.out.assembly, "all_sample_contigs.fasta.gz")
output:
assembly = os.path.join(dir.out.results, "cross_assembly.fasta"),
graph = os.path.join(dir.out.results, "cross_assembly_graph.gfa"),
stats = os.path.join(dir.out.assembly, "FLYE", "contig_dictionary.stats")
params:
flye_out = lambda w, output: os.path.split(output.stats)[0],
flye_params = config.assembly.flye,
assembly = os.path.join(dir.out.assembly, "FLYE", "assembly.fasta"),
graph = os.path.join(dir.out.assembly, "FLYE", "assembly_graph.gfa")
benchmark:
os.path.join(dir.out.bench, "population_assembly.txt")
log:
log1 = os.path.join(dir.out.stderr, "population_assembly.flye.log"),
log2 = os.path.join(dir.out.stderr, "population_assembly.stats.log")
resources:
mem_mb = config.resources.med.mem,
time = config.resources.med.time
threads:
config.resources.med.cpu
conda:
os.path.join(dir.env, "metaflye.yaml")
shell:
"""
flye --subassemblies {input} -t {threads} --plasmids -o {params.flye_out} {params.flye_params} &>> {log.log1}
rm {log.log1}
mv {params.assembly} {output.assembly}
mv {params.graph} {output.graph}
statswrapper.sh in={output.assembly} out={output.stats} \
format=2 \
ow=t 2> {log.log2}
rm {log.log2}
"""
rule create_contig_count_table:
"""Assembly step 08: Transcript Per Million (TPM) calculator
Useful resource: https://www.rna-seqblog.com/rpkm-fpkm-and-tpm-clearly-explained/"""
input:
rpkm = os.path.join(dir.out.mapping, "{sample}.rpkm"),
covstats = os.path.join(dir.out.mapping, "{sample}.cov_stats")
output:
count_tbl = temp(os.path.join(dir.out.mapping, "{sample}_contig_counts.tsv"))
benchmark:
os.path.join(dir.out.bench, "create_contig_count_table.{sample}.txt")
log:
os.path.join(dir.out.stderr, "create_contig_count_table.{sample}.log")
script:
os.path.join(dir.scripts, 'contigCountTable.py')
rule concatentate_contig_count_tables:
"""Assembly step 09: Concatenate contig count tables"""
input:
expand(os.path.join(dir.out.mapping, "{sample}_contig_counts.tsv"), sample=samples.names)
output:
os.path.join(dir.out.results, "contig_count_table.tsv")
benchmark:
os.path.join(dir.out.bench, "concatentate_contig_count_tables.txt")
log:
os.path.join(dir.out.stderr, "concatentate_contig_count_tables.log")
shell:
"""
{{
head -1 {input[0]} > {output};
tail -q -n +2 {input} | grep -vP '^Sample\s' >> {output};
}} 2> {log}
rm {log}
"""
|
import math
import random
n = int(input("de que tamaño es la matriz?: "))
a = n*n
matriz = []
for i in range (0,a):
matriz.append(a)
|
from flask import Flask, render_template, request
from Code import func
from Code import func1
from connection import data_insert
app = Flask(__name__)
@app.route('/', methods=['GET'])
def show_index_html():
return render_template('index.html')
@app.route('/update.html', methods=['POST'])
def show_index():
return render_template('update.html')
@app.route('/send_data', methods=['POST'])
def get_data_from_html():
pay = request.form['pay']
movies, poster, year, genre, rating, summary = func(pay)
length = len(movies)
return render_template('index.html', movies=movies, poster=poster, year=year, genre=genre, rating=rating, summary=summary, length=length)
@app.route('/send_Genres', methods=['POST'])
def get_data():
inp = request.form['Genres']
movies, poster, year, genre, rating, summary = func1(inp)
length = len(movies)
return render_template('index.html', movies=movies, poster=poster, year=year, genre=genre, rating=rating, summary=summary, length=length)
@app.route('/Add_data', methods=['POST'])
def add_data():
rating = request.form['rating']
name = request.form['name']
genre = request.form['genre']
year = request.form['year']
summary = request.form['summary']
cast = request.form['cast']
writer = request.form['writer']
poster = request.form['trailer']
director = request.form['director']
k=data_insert(name,genre,cast,director,writer,summary,poster,year,rating)
return render_template('index.html',k=k)
if __name__ == '__main__':
app.run(debug=True)
|
import logging.config
import os
import sys
TRUTH_STRINGS = ["True", "true", "1", "t", "y", "yes"]
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
"SECRET_KEY", default="wpqb4)9vq85zf$!f=x6@t75v0j*-nb^dizdjy(hovma57g*bgv"
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG", default="yes") in TRUTH_STRINGS
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
"bootstrap3",
"leaflet",
"djgeojson",
"corsheaders",
"data",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = "aileen.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [TEMPLATE_DIR],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "aileen.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Europe/Amsterdam"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "assets")
LOGIN_REDIRECT_URL = "server:home"
LOGOUT_REDIRECT_URL = "server:logout"
# LEAFLET_CONFIG is needed to configure leaflet
LEAFLET_CONFIG = {
"DEFAULT_CENTER": (52.2, 4.7),
"DEFAULT_ZOOM": 10,
"RESET_VIEW": False,
}
# --- Custom logging config
LOGGING_CONFIG = None
aileen_logging_config = {
"version": 1,
"formatters": {
"default": {"format": "[Aileen][%(asctime)s] %(levelname)s: %(message)s"},
"detail": {
"format": "[Aileen][%(asctime)s] %(levelname)s: %(message)s [log made in %(pathname)s:%(lineno)d]"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"stream": sys.stdout,
"formatter": "default",
},
"file": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "detail",
"filename": BASE_DIR + "/aileen.log",
},
},
"root": {"level": "INFO", "handlers": ["console", "file"], "propagate": True},
}
logging.config.dictConfig(aileen_logging_config)
# -------- Custom settings added by the Aileen team ----------
# ----- some can be customised by environment variables ----
AILEEN_MODE = os.environ.get(
"AILEEN_MODE", default="box"
) # can also be "server" or "both" (for dev purposes)
if AILEEN_MODE in ("box", "both"):
INSTALLED_APPS.append("box")
if AILEEN_MODE in ("server", "both"):
INSTALLED_APPS.append("server")
# ---- General Settings
# Display this in front of simply stdout printing
TERM_LBL = "[Aileen-Core]"
# ---- Box Settings
# If you have installed aileen in a virtual env, the tmux session needs to know how to get that activated.
ACTIVATE_VENV_CMD = os.environ.get("AILEEN_ACTIVATE_VENV_CMD", default="")
ACTIVATE_VENV_CMD = (
f"{ACTIVATE_VENV_CMD};"
if ACTIVATE_VENV_CMD and not ACTIVATE_VENV_CMD.endswith(";")
else ACTIVATE_VENV_CMD
)
# Required: Specify which importable module contains the functions
# used by Aileen (start_sensing, latest_reading_as_df, check_preconditions).
# Make sure it is known to the Python interpreter, so e.g. you could:
#
# export SENSOR_MODULE=aileen_sensing_api
# export PYTHONPATH=$PYTHONPATH./path/to/sen
SENSOR_MODULE = os.environ.get("AILEEN_SENSOR_MODULE", default="")
# This can be used to find / clean up files produced by the sensor
SENSOR_FILE_PREFIX = os.environ.get("AILEEN_SENSOR_FILE_PREFIX", "")
# the port under which the local box server can be reached (e.g. 127.0.0.1:5656)
BOX_PORT = os.environ.get("AILEEN_BOX_PORT", default=5656)
SENSOR_LOG_INTERVAL_IN_SECONDS = int(
os.environ.get("AILEEN_SENSOR_LOG_INTERVAL_IN_SECONDS", default=5)
)
# if this is false, no uploading will take place
INTERNET_CONNECTION_AVAILABLE = (
os.environ.get("AILEEN_INTERNET_CONNECTION_AVAILABLE", default="yes")
in TRUTH_STRINGS
)
UPLOAD_INTERVAL_IN_SECONDS = int(
os.environ.get("AILEEN_UPLOAD_INTERVAL_IN_SECONDS", default=60)
)
UPLOAD_MAX_NUMBER_PER_REQUEST = int(
os.environ.get("AILEEN_UPLOAD_MAX_NUMBER_PER_REQUEST", default=500)
)
STATUS_MONITORING_INTERVAL_IN_SECONDS = int(
os.environ.get("AILEEN_STATUS_MONITORING_INTERVAL_IN_SECONDS", default=60)
)
PROCESS_RESTART_INTERVAL_IN_SECONDS = int(
os.environ.get("AILEEN_PROCESS_RESTART_INTERVAL_IN_SECONDS", default=600)
)
if PROCESS_RESTART_INTERVAL_IN_SECONDS % STATUS_MONITORING_INTERVAL_IN_SECONDS != 0:
print(
"Configuration problem in aileen/settings.py: Please make PROCESS_RESTART_INTERVAL_IN_SECONDS (now %d)"
" a multiple of STATUS_MONITORING_INTERVAL_IN_SECONDS (%d)."
% (PROCESS_RESTART_INTERVAL_IN_SECONDS, STATUS_MONITORING_INTERVAL_IN_SECONDS)
)
sys.exit(2)
# whether to hash observable IDs, defaults true
HASH_OBSERVABLE_IDS = (
os.environ.get("AILEEN_HASH_OBSERVABLE_IDS", default="False") in TRUTH_STRINGS
)
HASH_ITERATIONS = int(
os.environ.get("AILEEN_HASH_ITERATIONS", default=500_000)
) # 2013 they recommended "at least 100000"
# whether boxes should upload events to the server (otherwise just aggregations)
UPLOAD_EVENTS = os.environ.get("AILEEN_UPLOAD_EVENTS", default="False") in TRUTH_STRINGS
# For tmux sessions and writing info to DB
TMUX_SESSION_NAME = "aileen_tmux_session"
# Name of temporary folder for sensor output
TMP_DIR_NAME = "aileen_sensor_data"
# ---- Server Settings
|
from typing import TYPE_CHECKING
from typing import Protocol
from uuid import UUID
from aristaeus.domain.entities.swarm import Swarm
__all__ = ["SwarmRepositoryAdapter"]
if TYPE_CHECKING:
Base = object
else:
Base = Protocol
class SwarmRepositoryAdapter(Base):
async def save(self, entity: Swarm) -> None:
...
async def get(self, public_id: UUID) -> Swarm:
...
async def update(self, swarm: Swarm) -> None:
...
async def delete(self, swarm: Swarm) -> None:
...
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-06-03 01:30
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0011_auto_20190603_0120'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='deadline',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 3, 3, 30, 25, 671152)),
),
]
|
#Pool代表进程池,进行多个进程的管理
from multiprocessing import Pool
import time, os, random
def run(name):
start = time.time()
print("子进程%d启动--%s"%(name,os.getpid()))
time.sleep(random.choice([1,2,3]))
end = time.time()
print("子进程%d结束--%s--耗时%.3f"%(name,os.getpid(),end - start))
if __name__ == "__main__":
print("父进程启动")
#创建多个子进程
#进程池
#参数表示可以同时执行的进程数量
#Pool默认大小是CPU核心数
pp = Pool(4) #默认本机CPU有4个核心数
for i in range(3):
#创建进程,放入进程池统一管理
pp.apply_async(run, args=(i, ))
#在使用进程池时,在调用join之前必须先调用close,调用close之后就不能再继续添加新的进程了
pp.close()
#进程池对象调用join,会等待进程池中的所有子进程结束完毕再去执行父进程
pp.join()
print("父进程结束") |
# coding= utf-8
class Person(object):
def __init__(self, clientSocket, account, name):
self.clientSocket = clientSocket
self.account = account
self.name = name
|
a1 = float(input("nota1: ")) * 1
a2 = float(input("nota2: ")) * 2
a3 = float(input("nota3: ")) * 3
a4 = float(input("nota4: ")) * 4
m = (a1 + a2 + a3 + a4) / 10
g = round(m, 2)
print(g) |
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.compute import ComputeManagementClient
import azure.mgmt.batchai as batchai
import azure.mgmt.batchai.models as baimodels
import os
import json
import sys
config_file = sys.argv[3]
with open(config_file) as f:
j = json.loads(f.read())
# Azure service principle login credentials
TENANT_ID = j['TENANT_ID']
CLIENT = j['CLIENT']
KEY = j['KEY']
# Batch AI cluster info
resource_group_name = j['resource_group_name']
subscription_id = str(j['subscription_id'])
cluster_name = j['cluster_name']
location = j['location']
command_line = j['command_line']
std_out_err_path_prefix = j['std_out_err_path_prefix']
config_file_path = j['config_file_path']
node_count = j['node_count']
# job parameters
ts_from = sys.argv[1]
ts_to = sys.argv[2]
device_ids = j['device_ids']
tags = j['tags']
job_name_template = j['job_name']
credentials = ServicePrincipalCredentials(
client_id=CLIENT,
secret=KEY,
tenant=TENANT_ID
)
batchai_client = batchai.BatchAIManagementClient(
credentials=credentials, subscription_id=subscription_id)
cluster = batchai_client.clusters.get(resource_group_name, cluster_name)
# run an async job for each sensor
for device_id in device_ids:
for tag in tags:
job_name = job_name_template.format(device_id, tag)
custom_settings = baimodels.CustomToolkitSettings(command_line=command_line.format(device_id, tag, ts_from, ts_to, config_file_path))
print('command line: ' + custom_settings.command_line)
params = baimodels.job_create_parameters.JobCreateParameters(location=location,
cluster=baimodels.ResourceId(
cluster.id),
node_count=node_count,
std_out_err_path_prefix=std_out_err_path_prefix,
custom_toolkit_settings=custom_settings
)
batchai_client.jobs.create(resource_group_name, job_name, params)
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List
import numpy as np
import matplotlib.pyplot as plt
def plot_forecast_supportset_attention(
query_past: List[np.ndarray],
query_future: List[np.ndarray],
pred: List[np.ndarray],
supps: List[np.ndarray],
attention: List[np.ndarray],
quantiles: List[str],
) -> plt.Figure:
"""
Plots the provided forecasts for each sample with confidence intervals using all provided quantiles.
Furthermore, plots the time series in the support set of this sample aligned with their attention scores.
Parameters
----------
query_past: List[np.ndarray] of length n_samples, each array has shape [context length]
The past query which the model uses to make a prediction.
query_future: List[np.ndarray] of length n_samples, each array has shape [prediction horizon length]
The ground truth for the forecast.
pred: List[np.ndarray], of length n_samples, each array has shape [prediction horizon length, n_quantiles]
The prediction of the model.
supps: List[np.ndarray], length is n_samples, each item has shape[supps_size, support ts length]
The support sets for each query.
attention: List[np.ndarray], length is n_samples, each item has shape[supps_size, support ts length]
The attention scores for each support set times series.
quantiles: List[float], shape[n_quantiles,]
Returns
-------
plt.figure.Figure
A plot containing one subplot for each sample. Each subplot displays the ground truth, predicted confidence
intervals, the predicted median. Furthermore, the time series in the support set aligned with their attention
scores.
"""
supps_size = len(supps[0])
n_samples = len(query_past)
plots_per_sample = supps_size + 1
n_plots = plots_per_sample * n_samples
fig, subs = plt.subplots(n_plots, figsize=(10, 4 * n_plots))
for i in range(n_samples):
# plot prediction
subs[plots_per_sample * i].set_title(f"query {i}")
_plot_quantile_forecast(
subs[plots_per_sample * i],
query_past[i],
query_future[i],
pred[i],
quantiles,
)
# plot support ts and attention weights
for j in range(1, supps_size + 1):
subs[plots_per_sample * i + j].set_title(
f"support ts {j} for query {i}"
)
subs[plots_per_sample * i + j].plot(supps[i][j - 1].squeeze())
subs[plots_per_sample * i + j].sharex(subs[plots_per_sample * i])
subs[plots_per_sample * i + j].sharey(subs[plots_per_sample * i])
# twin object for two different y-axis on the sample plot
ax2 = subs[plots_per_sample * i + j].twinx()
for head in range(attention[0][0].shape[1]):
n_att = len(attention[i][j - 1][:, head])
n_supps = len(supps[i][j - 1])
ax2.fill_between(
np.linspace(start=0, stop=n_supps, num=n_att),
attention[i][j - 1][:, head],
alpha=0.25,
# label=f"head {head}",
label="accumulated attention",
)
if j == 1:
ax_base = ax2
else:
ax2.sharey(ax_base)
ax2.set_ylabel("attention scores", color="red")
ax2.legend()
return fig
def plot_quantile_forecast(
query_past: List[np.ndarray],
query_future: List[np.ndarray],
pred: List[np.ndarray],
quantiles: List[str],
) -> plt.Figure:
"""
Plots the provided forecasts for each sample with confidence intervals using all provided quantiles.
Parameters
----------
query_past: List[np.ndarray] of length n_samples, each array has shape [context length]
The past query which the model uses to make a prediction.
query_future: List[np.ndarray] of length n_samples, each array has shape [prediction horizon length]
The ground truth for the forecast.
pred: List[np.ndarray], of length n_samples, each array has shape [prediction horizon length, n_quantiles]
The prediction of the model.
quantiles: List[float], shape[n_quantiles,]
Returns
-------
plt.figure.Figure
A plot containing one subplot for each sample. Each subplot displays the ground truth, predicted confidence
intervals and the predicted median.
"""
n_samples = len(query_past)
fig, subs = plt.subplots(n_samples, figsize=(10, 4 * n_samples))
if n_samples > 1:
for i in range(n_samples):
subs[i].set_title(f"sample {i}")
_plot_quantile_forecast(
subs[i], query_past[i], query_future[i], pred[i], quantiles
)
else:
i = 0
subs.set_title(f"sample {i}")
_plot_quantile_forecast(
subs, query_past[i], query_future[i], pred[i], quantiles
)
return fig
def _plot_quantile_forecast(
sub: plt.Axes,
query_past: np.ndarray,
query_future: np.ndarray,
pred: np.ndarray,
quantiles: List[str],
):
"""
Plots the provided forecast with confidence intervals using all provided quantiles.
Parameters
----------
sub: plt.Axis
The axis object to plot on.
query_past: np.ndarray, shape[context length]
The past query which the model uses to make a prediction.
query_future: np.ndarray, shape[prediction horizon length]
The ground truth for the forecast.
pred: np.ndarray, shape[prediction horizon length, n_quantiles]
The prediction of the model.
quantiles: List[str], shape[n_quantiles,]
"""
assert len(pred) == len(
query_future
), f"len pred is {len(pred)}, len query_future is {len(query_future)}"
query = np.concatenate([query_past, query_future])
sub.plot(query, label="gt")
sub.axvline(len(query_past) - 1, color="r") # end of train dataset
pred_time = np.arange(len(query_past), len(query_past) + len(pred))
cmap = plt.get_cmap("Oranges")
# Plot the shapes for the confidence intervals, starting with the outermost
num_intervals = pred.shape[-1] // 2
for i in range(num_intervals):
lower = pred[..., i]
upper = pred[..., -(i + 1)]
ci = 100 - 2 * int(float(quantiles[i]) * 100)
cmap_index = int((cmap.N / (num_intervals + 1)) * (i + 1))
if len(pred) > 1:
sub.fill_between(
pred_time,
lower,
upper,
color=cmap(cmap_index),
label=f"{ci}% CI",
)
else:
sub.fill_between(
np.append(pred_time, pred_time[0] + 0.5),
np.repeat(lower, 2),
np.repeat(upper, 2),
color=cmap(cmap_index),
label=f"{ci}% CI",
)
if len(pred) > 1:
sub.plot(
pred_time,
pred[..., num_intervals],
color="black",
label="pred median",
)
else:
sub.plot(
np.append(pred_time, pred_time[0] + 0.5),
np.repeat(pred[..., num_intervals], 2),
color="black",
label="pred median",
)
sub.legend()
def plot_point_forecast(sub, query_past, query_future, pred):
query = np.concatenate([query_past, query_future])
sub.plot(query, label="gt")
sub.axvline(len(query_past) - 1, color="r") # end of train dataset
pred_time = np.arange(len(query_past), len(query_past) + len(pred))
sub.plot(pred_time, pred, label="pred")
|
import socket
def check(l):
if l[0][0] == 1 and l[0][1] == 1 and l[0][2] == 1:
return True
elif l[1][0] == 1 and l[1][1] == 1 and l[1][2] == 1:
return True
elif l[2][0] == 1 and l[2][1] == 1 and l[2][2] == 1:
return True
elif l[0][0] == 1 and l[1][0] == 1 and l[2][0] == 1:
return True
elif l[0][1] == 1 and l[1][1] == 1 and l[2][1] == 1:
return True
elif l[0][2] == 1 and l[1][2] == 1 and l[2][2] == 1:
return True
elif l[0][0] == 1 and l[1][1] == 1 and l[2][2] == 1:
return True
elif l[0][2] == 1 and l[1][1] == 1 and l[2][0] == 1:
return True
else:
return False
s=socket.socket()
host=socket.gethostname()
port=8009
s.bind((host,port))
s.listen(1)
c,addr=s.accept()
# l = [[0 for i in range(3)] for i in range(3)]
l = [[0,0,0],[0,0,0],[0,0,0]]
print("Enter moves: row(0,2) column(0,2")
while True:
client_msg = c.recv(1024).decode()
if(client_msg == "End"):
print("Second Player looses")
break
print("First Player Move: ",client_msg)
client_msg = client_msg.split(" ")
client_msg[0] = int(client_msg[0])
client_msg[1] = int(client_msg[1])
l[client_msg[0]][client_msg[1]] = -1
server_msg = input("Second Player Move: ")
# list object can't be send so creating a new list and sending only string
server_msg1 = server_msg.split(" ")
server_msg1[0] = int(server_msg1[0])
server_msg1[1] = int(server_msg1[1])
l[server_msg1[0]][server_msg1[1]] = 1
for x in l:
print(x)
if check(l) == True:
print("Second Player wins")
server_msg = "End"
c.send(server_msg.encode())
break
else:
c.send(server_msg.encode())
s.close() |
import sys
import os.path
import system
import dirutils
import tempfile
import shutil
from pathlib import Path
temp_path = os.path.abspath(sys.argv[1])
directory = os.path.abspath(sys.argv[2])
csv = os.path.abspath(sys.argv[3])
exe = sys.argv[4]
# create temporary dir to run the analyzer
tmpdir_path = os.path.join(str(Path.home()),"tmp", "flawfinder-" + next(tempfile._get_candidate_names()))
shutil.copytree(directory, tmpdir_path)
print("======[FLAWFINDER]=======")
print("[CWD]:", tmpdir_path)
print("[CSV]:", csv)
print("[EXE]:", exe)
source_files = dirutils.list_files(tmpdir_path, '.c') + dirutils.list_files(tmpdir_path, '.cpp')
dirutils.file_line_error_header(csv)
dirutils.reset_file(temp_path)
for source_file in source_files:
if source_file.endswith("main.c"):
continue
if source_file.endswith("invalid_extern_1.c"):
continue
if source_file.endswith("invalid_extern.c"):
source_file = source_file + " " + os.path.join(tmpdir_path, "invalid_extern_1.c")
flawfinder = exe + " " + source_file
(output, err, exit, time) = system.system_call(flawfinder, tmpdir_path)
dirutils.tool_exec_log(temp_path, flawfinder, output, err, exit)
all_lines = output.splitlines()
lines = []
line_codes = []
collect_flag = False
for line in all_lines:
dec = line.decode("utf-8").strip()
if (collect_flag):
lines.append(dec)
if (len(dec.split(":")) >= 3):
line_codes.append(True)
else:
line_codes.append(False)
if dec == "FINAL RESULTS:":
collect_flag = True
if dec == "ANALYSIS SUMMARY:":
break
sys.stdout = open(csv, "a")
for i in range(0,len(lines)):
if (line_codes[i]):
a = lines[i].split(":")
filename = os.path.basename(a[0])
line_no = a[1]
error_message = ""
j = 2
while (j < len(a)):
error_message = error_message + ":" + a[j]
j = j + 1
j = i + 1
while (j < len(lines)):
if (not line_codes[j]):
error_message += error_message + " " + lines[j].strip()
j = j + 1
else:
break;
print(filename, ",", line_no, ",", "\"" + error_message + "\"")
sys.stdout = sys.__stdout__
print("[CLEANUP]: removing ", tmpdir_path)
shutil.rmtree(tmpdir_path)
print("======[DONE WITH FLAWFINDER]=======")
|
'''Task 5. Вводится строка. Необходимо достать
из нее все числа. И поместить их в список'''
message = input('Введите что-то: ').strip()
nums = []
for symbol in message:
if symbol.isdigit():
nums.append(symbol)
print(nums)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.