text stringlengths 38 1.54M |
|---|
# coding=utf-8
'''
Created on 2016-7-26
@author: Jennifer
Project:编写Web测试用例
'''
import unittest
from test import test_baidu
from test import test_youdao
from test import test_json
#构造测试集
suite = unittest.TestSuite()
suite.addTest(test_baidu.BaiduTest('test_baidu'))
suite.addTest(test_youdao.YoudaoTest('test_youdao'))
suite.addTest(test_json.MyTestSuite("test_image_match_001"))
if __name__=='__main__':
#执行测试
runner = unittest.TextTestRunner()
runner.run(suite) |
s = 0
for i in range(3,118):
if i%15 == 0:
s = s + 15
elif i%5 == 0:
s = s + 5
elif i%3 == 0:
s = s + 3
else:
s = s + 1
print s
|
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import base64
st.set_page_config(page_title=None, page_icon=None, layout='wide', initial_sidebar_state='auto')
DATA_URL = (
"OVO-absolute-data.csv"
)
st.title("OVO Share of Search Dashboard")
st.sidebar.title("Choose an Option")
@st.cache(persist=True)
def load_data():
data = pd.read_csv(DATA_URL)
data['Date'] = pd.to_datetime(data['Date'])
return data
data = load_data()
DATA_URL2 = (
"OVO-relative-data.csv"
)
@st.cache(persist=True)
def load_data():
data_two = pd.read_csv(DATA_URL2)
data_two['Date'] = pd.to_datetime(data_two['Date'])
return data_two
data_two = load_data()
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}" download="absolute-trends.csv">Download csv file</a>'
return href
def get_table_download_link_two(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}" download="relative-trends.csv">Download csv file</a>'
return href
st.sidebar.markdown("### Choose View")
select = st.sidebar.selectbox('Metric', ['Absolute Trends', 'Relative Trend'], key='5')
if not st.sidebar.checkbox("Hide", False, key=5):
if select == 'Absolute Trends':
st.markdown("#### The graphs show the trend for each brand in isolation on a weekly basis over the past 5 years. Zoom into to any time period over the past 5 years by selecting a portion of the graph, then zoom back out by double-clicking.")
fig_26 = px.line(data, x="Date", y="Bulb", title='Bulb Absolute Trend')
st.plotly_chart(fig_26, use_container_width=True)
fig_27 = px.line(data, x="Date", y="Octopus", title='Octopus Absolute Trend')
st.plotly_chart(fig_27, use_container_width=True)
fig_28 = px.line(data, x="Date", y="OVO", title='OVO Absolute Trend')
st.plotly_chart(fig_28, use_container_width=True)
fig_29 = px.line(data, x="Date", y="Good Energy", title='Good Energy Absolute Trends')
st.plotly_chart(fig_29, use_container_width=True)
fig_30 = px.line(data, x="Date", y="People's Energy", title="People's Energy Absolute Trend")
st.plotly_chart(fig_30, use_container_width=True)
st.dataframe(data)
st.markdown(get_table_download_link(data), unsafe_allow_html=True)
if select == 'Relative Trend':
st.markdown("#### The graph shows estimated weekly search volume by brand - by combining a relative Google Trends score over the past 5 years with an estimated search volume for each brand. Zoom into to any time period over the past 5 years by selecting a portion of the graph, then zoom back out by double-clicking.")
fig_31 = px.line(data_two, x="Date", y="Estimated Weekly Search Volume", color="Brand", title='Relative Trends by Brand')
st.plotly_chart(fig_31, use_container_width=True)
st.dataframe(data_two)
st.markdown(get_table_download_link_two(data_two), unsafe_allow_html=True)
|
import FWCore.ParameterSet.Config as cms
from RecoHI.HiEgammaAlgos.HiIsolationCommonParameters_cff import *
isoC1 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(1),
y = cms.double(0),
)
isoC2 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(2),
y = cms.double(0),
)
isoC3 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(3),
y = cms.double(0),
)
isoC4 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(4),
y = cms.double(0),
)
isoC5 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(5),
y = cms.double(0),
)
isoCC1 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(1),
y = cms.double(0),
)
isoCC2 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(2),
y = cms.double(0),
)
isoCC3 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(3),
y = cms.double(0),
)
isoCC4 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(4),
y = cms.double(0),
)
isoCC5 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Cx"),
x = cms.double(5),
y = cms.double(0),
)
isoR1 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(1),
y = cms.double(0),
)
isoR2 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(2),
y = cms.double(0),
)
isoR3 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(3),
y = cms.double(0),
)
isoR4 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(4),
y = cms.double(0),
)
isoR5 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("noBackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(5),
y = cms.double(0),
)
isoCR1 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(1),
y = cms.double(0),
)
isoCR2 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(2),
y = cms.double(0),
)
isoCR3 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(3),
y = cms.double(0),
)
isoCR4 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(4),
y = cms.double(0),
)
isoCR5 = cms.EDProducer("HiEgammaIsolationProducer",
isolationInputParameters,
mode = cms.string("BackgroundSubtracted"),
iso = cms.string("Rx"),
x = cms.double(5),
y = cms.double(0),
)
hiEcalIsolation = cms.Sequence(isoC1+isoC2+isoC3+isoC4+isoC5)
hiEcalIsolationBckSubtracted = cms.Sequence(isoCC1+isoCC2+isoCC3+isoCC4+isoCC5)
hiHcalIsolation = cms.Sequence(isoR1+isoR2+isoR3+isoR4+isoR5)
hiHcalIsolationBckSubtracted = cms.Sequence(isoCR1+isoCR2+isoCR3+isoCR4+isoCR5)
hiCaloIsolation = cms.Sequence(hiEcalIsolation+hiHcalIsolation)
hiCaloIsolationBckSubtracted = cms.Sequence(hiEcalIsolationBckSubtracted+hiHcalIsolationBckSubtracted)
hiCaloIsolationAll = cms.Sequence(hiCaloIsolation+hiCaloIsolationBckSubtracted)
|
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
try:
url = "http://suninjuly.github.io/registration1.html"
browser = webdriver.Firefox()
browser.get(url)
# Filling out fields
first_name = browser.find_element_by_tag_name("input")
first_name.send_keys("Ivan")
last_name = browser.find_element(By.CSS_SELECTOR, "input.second")
last_name.send_keys("Petrov")
email = browser.find_element_by_class_name("third")
email.send_keys("tets@test.com")
phone = browser.find_element_by_xpath("//label[text()='Phone:']/following-sibling::input")
phone.send_keys("+777777777777")
address = browser.find_element_by_xpath("//input[@placeholder='Input your address:']")
address.send_keys("Russian, SP, Nevskiy av.")
# Form submission
button = browser.find_element_by_css_selector("button.btn")
button.click()
time.sleep(1)
welcome_text_elt = browser.find_element_by_tag_name("h1")
welcome_text = welcome_text_elt.text
assert "Congratulations! You have successfully registered!" == welcome_text
finally:
time.sleep(10)
browser.quit()
|
"""
Your task is to create functionisDivideBy
(or is_divide_by) to check if an integer
number is divisible by each out of two arguments.
A few cases:
(-12, 2, -6) -> true
(-12, 2, -5) -> false
(45, 1, 6) -> false
(45, 5, 15) -> true
(4, 1, 4) -> true
(15, -5, 3) -> true
"""
def is_divide_by(number, a, b):
return number % a == 0 and number % b == 0
print("Tests:")
print(is_divide_by(8, 2, 4))
print(is_divide_by(12, -3, 4))
print(is_divide_by(8, 3, 4))
print(is_divide_by(48, 2, -5))
print(is_divide_by(-100, -25, 10))
print(is_divide_by(10000, 5, -3))
print(is_divide_by(4, 4, 2))
print(is_divide_by(5, 2, 3))
print(is_divide_by(-96, 25, 17))
print(is_divide_by(33, 1, 33)) |
from myblog.models import BlogPost
from django.contrib import admin
class BlogAdmin(admin.ModelAdmin):
list_display=['title','timestamp','blog_type']
fieldsets=[
(None,{'fields':['title','timestamp','blog_type']}),
(None,{'fields':['body']}),
]
admin.site.register(BlogPost,BlogAdmin) |
import gdalnumeric
# name of our source image
src = "FalseColor.tif"
# load the source image into an array
arr = gdalnumeric.LoadFile(src)
print arr.flat
# swap bands 1 and 2 for a natural color image.
# We will use numpy "advanced slicing" to reorder the bands.
# Using the source image
# gdalnumeric.SaveArray(arr[[1,0,2],:], "swap.tif", format="GTiff", prototype=src)
|
#1, -3, 5, -6, -10, 13
res = 0
sum_sq = 0
while True:
n = int(input())
res += n
sum_sq += n ** 2
if res == 0:
break
print(sum_sq)
|
"""
Main entry point
"""
from pyramid.config import Configurator
def main(global_config, **settings):
"""Basic settings, including route prefix and database access"""
config = Configurator(settings=settings)
config.route_prefix = 'v1'
config.include('cornice')
config.include('builddb_rest.couch_db')
config.include('builddb_rest.proddata')
config.include('builddb_rest.build_repos')
config.scan()
return config.make_wsgi_app()
|
users = [{"name": "aptrinsic_id",
"type": "varchar(256)"},
{"name": "identify_id",
"type": "varchar(256)"},
{"name": "type",
"type": "varchar(256)"},
{"name": "gender",
"type": "varchar(256)"},
{"name": "email",
"type": "varchar(256)"},
{"name": "first_name",
"type": "varchar(256)"},
{"name": "last_name",
"type": "varchar(256)"},
{"name": "last_seen_date",
"type": "timestamp"},
{"name": "sign_up_date",
"type": "timestamp"},
{"name": "first_visit_date",
"type": "timestamp"},
{"name": "title",
"type": "varchar(256)"},
{"name": "phone",
"type": "varchar(256)"},
{"name": "score",
"type": "int4"},
{"name": "role",
"type": "varchar(256)"},
{"name": "subscription_id",
"type": "varchar(256)"},
{"name": "account_id",
"type": "varchar(256)"},
{"name": "number_of_visits",
"type": "int4"},
{"name": "location_country_name",
"type": "varchar(256)"},
{"name": "location_country_code",
"type": "varchar(256)"},
{"name": "location_state_name",
"type": "varchar(256)"},
{"name": "location_state_code",
"type": "varchar(256)"},
{"name": "location_city",
"type": "varchar(256)"},
{"name": "location_street",
"type": "varchar(256)"},
{"name": "location_postal_code",
"type": "varchar(256)"},
{"name": "location_continent",
"type": "varchar(256)"},
{"name": "location_region_name",
"type": "varchar(256)"},
{"name": "location_time_zone",
"type": "varchar(256)"},
{"name": "location_coordinates_latitude",
"type": "decimal(9,6)"},
{"name": "location_coordinates_longitude",
"type": "decimal(9,6)"},
{"name": "property_keys",
"type": "varchar(512)"},
{"name": "create_date",
"type": "timestamp"},
{"name": "last_modified_date",
"type": "timestamp"},
{"name": "custom_attributes",
"type": "varchar(512)"},
{"name": "global_unsubscribe",
"type": "boolean"}]
|
# -*- coding: utf-8 -*-
def IOU(Reframe,GTframe):
"""
计算两矩形 IOU,传入为均为矩形对角线,(x,y) 坐标。
"""
x1 = Reframe[0]
y1 = Reframe[1]
width1 = Reframe[2]-Reframe[0]
height1 = Reframe[3]-Reframe[1]
x2 = GTframe[0]
y2 = GTframe[1]
width2 = GTframe[2]-GTframe[0]
height2 = GTframe[3]-GTframe[1]
endx = max(x1+width1,x2+width2)
startx = min(x1,x2)
width = width1+width2-(endx-startx)
endy = max(y1+height1,y2+height2)
starty = min(y1,y2)
height = height1+height2-(endy-starty)
if width <=0 or height <= 0:
Acc_area = 0 # 重叠率为 0
Prec_area = 0
else:
Area = width * height # 两矩形相交面积
Area1 = width1 * height1 # REF面积
Area2 = width2 * height2 # TAR面积
Acc_area = Area*1./(Area1+Area2-Area) # IOU公式
Prec_area = Area * 1. / Area2 # 更改后的IOU
# return IOU
return Acc_area, Prec_area
if __name__ == "__main__":
Reframe1 = [0, 0, 10, 10]
Reframe2 = [10, 10, 100, 100]
GTframe = [5, 5, 15, 15]
IOU1 = IOU(Reframe1, GTframe)
IOU2 = IOU(Reframe2, GTframe)
print(IOU1)
print(IOU2)
# 更改后的总IOU
IOU_all = IOU1 + IOU2
print(IOU_all) |
#Question 7
#Write a Python program to remove duplicates from a list.
my_list = ["a","list","of","duplicates","a",123,56,123,"of",123]
no_dup = []
for item in my_list:
if item not in no_dup:
no_dup.append(item)
print("List without duplicates", no_dup)
#Alternate
my_list = list(set(my_list))
print("List w/o dupes",my_list)
#Question 8
#Write a Python program to check a list is empty or not.
my_list = []
my_list1 = ['a']
if len(my_list) == 0:
print("Empty")
else:
print("Non-Empty")
#Alternate:
if not my_list:
print("Empty")
#Question 9
#Write a Python program to clone or copy a list.
alist = [1,2,3,4,5,6]
blist = alist #Deep Copy
clist = alist.copy() #Shallow Copy
print(alist)
print(blist)
print(clist)
blist.pop()
print(alist)
print(blist)
print(clist)
#Question 10
#Write a Python program to find the list of words that are
#longer than n from a given list of words.
words = ["athleisure","squirm","extrapolate","excruciating","fun","enjoy","freedom"]
n = 5
satisfy = []
for item in words:
if len(item) > n:
satisfy.append(item)
print("Words that satisfy the condition:", satisfy)
#Question 11
#Write a Python function that takes two lists
#and returns True if they have at least one common member.
def list_match(list1,list2):
count = 0
for item in list1:
if item in list2:
count = count +1
if count > 0:
print("Both have at least one common entry")
else:
print("Nothing common found")
list_match(my_list,my_list1)
list_match(alist,blist)
#Question 12
#Write a Python program to print a specified list after removing the 0th, 4th and 5th elements.
#Sample List : ['Red', 'Green', 'White', 'Black', 'Pink', 'Yellow']
#Expected Output : ['Green', 'White', 'Black']
colors = ['Red', 'Green', 'White', 'Black', 'Pink', 'Yellow']
colors.pop(0)
colors.pop(3)
colors.pop(3)
print(colors)
#Alternate
colors = ['Red', 'Green', 'White', 'Black', 'Pink', 'Yellow']
colors = [x for (i,x) in enumerate(colors) if i not in (0,4,5)]
print(colors) |
from django.conf.urls import url
from django.urls import path
from first_app import views
app_name = 'first_app'
urlpatterns=[
## calling a class based view
path('', views.IndexView.as_view(), name='index'),
path('add_musician/', views.AddMusician.as_view(), name='add_musician'),
path('musician_detail/<pk>/', views.MusicianDetail.as_view(), name='musician_detail'),
path('musician_update/<pk>/', views.MusicianUpdate.as_view(), name='musician_update'),
path('musician_delete/<pk>/', views.MusicianDelete.as_view(), name='musician_delete'),
## old def based view
path('add_album/', views.album_form, name='album_form'),
path('add_musician/', views.musician_form, name='musician_form'),
path('album_list/<int:artist_id>/', views.album_list, name='album_list'),
path('edit_album/<int:album_id>/', views.edit_album, name='edit_album'),
path('delete_album/<int:album_id>/', views.delete_album, name='delete_album'),
path('delete_artist/<int:artist_id>/', views.delete_artist, name='delete_artist'),
path('all_albums/', views.all_albums, name='all_albums'),
path('all_musicians/', views.all_musicians, name='all_musicians'),
]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 14 22:14:12 2019
@author: Ananye
"""
import numpy as np
import random
import matplotlib.pyplot as plt
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda import compiler, gpuarray, tools
import time
"""
###############################################################################
define kernel codes and how to call them
###############################################################################
"""
class cuda_Transpose:
"""
Class of functions pertaining to computing a matrix transpose
using for loops and parallelized pyCuda code.
"""
def __init__(self):
# Kernal code:
self.transpose_kernel_code = """
__global__ void parTranspose(float *idata, float *odata, int cols, int rows) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if ((ix < cols) && (iy < rows)) {
odata[iy*cols + ix] = idata[ix*rows + iy];
}
}
"""
def transpose_parallel(self, a_cpu):
self.x = a_cpu
x_gpu = gpuarray.to_gpu(self.x)
self.y_gpu = gpuarray.empty((self.x.shape[1], self.x.shape[0]), np.float32)
M = self.x.shape[0]
N = self.x.shape[1]
mod = compiler.SourceModule(self.transpose_kernel_code)
timing = []
cTranspose = mod.get_function("parTranspose")
cTranspose(
x_gpu,
self.y_gpu,
np.int32(self.x.shape[0]),
np.int32(self.x.shape[1]),
block = (32, 32, 1),
grid = (np.int(np.ceil(np.float32(M)/np.float32(32))), np.int(np.ceil(np.float32(N)/np.float32(32))), 1)
)
return self.y_gpu.get()
class gpuMul:
def __init__(self):
self.mul_kernel_code = """
#define BLOCK_SIZE 16
__global__ void kernel_MatMul(float *A, int rA, int cA, float *B, int rB, int cB, float *C) {
int bIDx = blockIdx.x, bIDy = blockIdx.y, tIDx = threadIdx.x, tIDy = threadIdx.y;
int row_ = bIDy * BLOCK_SIZE + tIDy;
int col_ = bIDx * BLOCK_SIZE + tIDx;
__shared__ float A_sub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float B_sub[BLOCK_SIZE][BLOCK_SIZE];
float C_sub = 0.0;
for (int m = 0; m < (BLOCK_SIZE + cA - 1) / BLOCK_SIZE; m++) {
if (m * BLOCK_SIZE + tIDx < cA && row_ < rA) {
A_sub[tIDy][tIDx] = A[row_ * cA + m * BLOCK_SIZE + tIDx];
}
else {
A_sub[tIDy][tIDx] = 0.0;
}
if (m * BLOCK_SIZE + tIDy < rB && col_ < cB) {
B_sub[tIDy][tIDx] = B[(m * BLOCK_SIZE + tIDy) * cB + col_];
}
else {
B_sub[tIDy][tIDx] = 0.0;
}
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; k++) {
C_sub += A_sub[tIDy][k] * B_sub[k][tIDx];
}
__syncthreads();
}
if (row_ < rA && col_ < cB) {
C[cB * BLOCK_SIZE * bIDy + BLOCK_SIZE * bIDx + cB * tIDy + tIDx] = C_sub;
}
}
"""
def MatMul(self, A, rA, cA, B, rB, cB):
self.C_gpu = gpuarray.empty((A.shape[0], B.shape[1]), dtype = np.float32)
self.A_gpu = gpuarray.to_gpu(A)
self.B_gpu = gpuarray.to_gpu(B)
mod = compiler.SourceModule(self.mul_kernel_code)
dev_mul = mod.get_function("kernel_MatMul")
grid_x = np.int(np.ceil(cB*1.0/16))
grid_y = np.int(np.ceil(rA*1.0/16))
dev_mul(
self.A_gpu, rA, cA,
self.B_gpu, rB, cA,
self.C_gpu,
block = (16, 16, 1),
grid = (grid_x, grid_y, 1)
)
return self.C_gpu.get()
# computeParams.compute_params
class computeParams:
def __init__(self):
self.compute_params_kernel_code = """
__global__ void kernel_compute_params(float *device_A, int P, int iter, float *device_sine, float *device_cosine, int *device_IterBlockToElem) {
/*1 Block, P/2 threads: threadID t handles params for its alloted pair (for a particular device_iter)*/
# define EPSILON 1e-4
int localID = threadIdx.x;
int k, l;
float elem, y, d, r, c, s; //,t
k = device_IterBlockToElem[iter*P+localID*2]; //row
l = device_IterBlockToElem[iter*P+localID*2+1]; //col
elem = device_A[k * P + l];
__syncthreads();
y = (device_A[l * P + l] - device_A[k * P + k]) * 0.5;
__syncthreads();
d = fabs(y) + sqrt(elem * elem + y * y);
r = sqrt(elem * elem + d * d);
if (r < EPSILON) {
c = 1.0;
s = 0.0;
}
else {
c = d / r;
s = y / fabs(y) * elem / r; //t=y/fabs(y)*p*p/d;
}
__syncthreads();
if (k<P && l<P){
device_cosine[k * P + l] = c;
device_sine[k * P + l] = s;
}
}
"""
def compute_params(self, A, P, itr, iterblock):
self.A_gpu = gpuarray.to_gpu(A)
self.iterBlock_device = gpuarray.to_gpu(iterblock)
self.dev_sin = gpuarray.empty((P, P), np.float32)
self.dev_cos = gpuarray.empty((P, P), np.float32)
# self.iterBlock_device = gpuarray.empty((P-1)*P / 2 * 2), astype.int)
if (P % 2 == 0):
grid_size = np.int(P / 2)
else:
grid_size = np.int(P / 2 + 1)
mod = compiler.SourceModule(self.compute_params_kernel_code)
compute_params_code = mod.get_function("kernel_compute_params")
compute_params_code(
self.A_gpu, P, itr,
self.dev_sin,
self.dev_cos,
self.iterBlock_device,
block = (grid_size, grid_size, 1))
# block size?
dc = self.dev_cos.get()
ds = self.dev_sin.get()
self.A_gpu.get()
self.iterBlock_device.get()
return ds, dc
class dimUpdate:
def __init__(self,P):
self.row_update_kernel_code = """
__global__ void kernel_row_update(int iter, float *device_A, float *device_X, int P, float *device_sine, float *device_cosine, int *device_IterBlockToElem) {
int localID = threadIdx.x;
int blockID = blockIdx.x;
/*Based on blockID [total blocks=P/2], compute the corresponding two rows: p,q for device_iter*/
__shared__ int row_pair[2];
__shared__ float params[2]; //[sin_, cos_]
if (localID == 0) //to minimize global memory access latency at the cost of divergence
{
row_pair[0] = device_IterBlockToElem[iter*P+blockID * 2];
row_pair[1] = device_IterBlockToElem[iter*P+blockID * 2 + 1];
params[0] = device_sine[row_pair[0] * P + row_pair[1]];
params[1] = device_cosine[row_pair[0] * P + row_pair[1]];
}
__syncthreads(); //all "P" threads in the block are synchronized and have access to row_pair(k,l) and params
//CHECKPOINT: Can you reduce shared-memory bank conflicts here?
int k = row_pair[0], l = row_pair[1];
float sin_ = params[0], cos_ = params[1], elem_k=device_A[k*P+localID], elem_l=device_A[l * P + localID];
/*Concurrent modifications to all row pairs(k,l) [different blocks]*/
/*Concurrent modifications to different-column elements of a row pair: ["P" threads of the block]*/
/*X is col-major, i.e. write in X-transpose*/
device_X[localID * P + k] = elem_k * cos_ - elem_l * sin_;
device_X[localID * P + l] = elem_k * sin_ + elem_l * cos_;
}
"""
self.col_update_kernel_code = """
__global__ void kernel_col_update(int iter, float *device_A, float *device_X, int P, float *device_eigenvectors, float *device_sine, float *device_cosine, int *device_IterBlockToElem) {
int localID = threadIdx.x;
int blockID = blockIdx.x;
/*Based on blockID [total blocks=P/2], compute the corresponding two cols: p,q for device_iter*/
__shared__ int col_pair[2];
__shared__ float params[2]; //[sin_, cos_]
if (localID == 0) //to minimize global memory access latency at the cost of divergence
{
col_pair[0] = device_IterBlockToElem[iter*P+blockID * 2];
col_pair[1] = device_IterBlockToElem[iter*P+blockID * 2 + 1];
params[0] = device_sine[col_pair[0] * P + col_pair[1]];
params[1] = device_cosine[col_pair[0] * P + col_pair[1]];
}
__syncthreads(); //all "P" threads in the block are synchronized and have access to row_pair(k,l) and params
//CHECKPOINT: Can you reduce shared-memory bank conflicts here? Is this better than computing pair(p,q) all over again
int k = col_pair[0], l = col_pair[1];
float sin_ = params[0], cos_ = params[1];
/*Concurrent modifications to all row pairs(k,l) [different blocks]*/
/*Concurrent modifications to different-column elements of a row pair: ["P" threads of the block]*/
float new_eigen_k, new_eigen_l;
/* col-wise access (inefficient):*/
//device_A[localID * P + k] = device_X[k * P + localID] * cos_ - device_X[l * P + localID] * sin_;
//device_A[localID * P + l] = device_X[k * P + localID] * sin_ + device_X[l * P + localID] * cos_;
//new_eigen_k = device_eigenvectors[localID * P + k]*cos_ - device_eigenvectors[localID*P+l]*sin_;
//new_eigen_l = device_eigenvectors[localID * P+k]*sin_ + device_eigenvectors[localID*P+l]*cos_;
//device_eigenvectors[localID * P + k] = new_eigen_k;
//device_eigenvectors[localID * P+l] = new_eigen_l;
/*row-wise access (efficient):*/
int kp = k*P + localID, lp = l *P+localID;
device_A[kp] = device_X[kp] * cos_ - device_X[lp] * sin_;
__syncthreads();
device_A[lp] = device_X[kp] * sin_ + device_X[lp] * cos_;
__syncthreads();
new_eigen_k = device_eigenvectors[kp]*cos_ - device_eigenvectors[lp]*sin_;
__syncthreads();
new_eigen_l = device_eigenvectors[kp]*sin_ + device_eigenvectors[lp]*cos_;
__syncthreads();
device_eigenvectors[kp] = new_eigen_k;
device_eigenvectors[lp] = new_eigen_l;
__syncthreads();
}
"""
E = np.diag(np.ones((P), dtype = np.float32))
self.device_eigenvectors = gpuarray.to_gpu(E)
def row_update(self, itr, A, X_device, P, sin, cos, iterBlock):
self.A_device = gpuarray.to_gpu(A)
self.X_device = gpuarray.to_gpu(X_device)
self.dev_sin = gpuarray.to_gpu(sin)
self.dev_cos = gpuarray.to_gpu(cos)
self.iterBlock_device = gpuarray.to_gpu(iterBlock)
mod1 = compiler.SourceModule(self.row_update_kernel_code)
row_update_code = mod1.get_function("kernel_row_update")
if (P % 2 == 0):
grid_size = P / 2
else:
grid_size = P / 2 + 1
row_update_code(
itr, self.A_device,
self.X_device, P,
self.dev_sin, self.dev_cos,
self.iterBlock_device,
block = (np.int(P), np.int(P), 1),
grid = (np.int(grid_size), np.int(grid_size),1)
)
return self.X_device.get()
def col_update(self, itr, A, X_device, P, sin, cos, iterBlock):
self.A_device = gpuarray.to_gpu(A)
self.X_device = gpuarray.to_gpu(X_device)
self.dev_sin = gpuarray.to_gpu(sin)
self.dev_cos = gpuarray.to_gpu(cos)
self.iterBlock_device = gpuarray.to_gpu(iterBlock)
if (P % 2 == 0):
grid_size = P / 2
else:
grid_size = P / 2 + 1
mod2 = compiler.SourceModule(self.col_update_kernel_code)
col_update_code = mod2.get_function("kernel_col_update")
col_update_code(
itr, self.A_device,
self.X_device, P,
self.device_eigenvectors,
self.dev_sin, self.dev_cos,
self.iterBlock_device,
block = (np.int(P), np.int(P), 1),
grid = (np.int(grid_size), np.int(grid_size),1)
)
return self.device_eigenvectors.get()
"""
###############################################################################
On to PCA and SVD
###############################################################################
"""
def cudaSVD(N, P, D):
# Perform SVD for D_T
# Get eigen values and eigen vectors for D_T*D
chess_params_kernel_code = """
__device__ void chess_tourney_params(int P, int *row_pair, int iter) {
//NOTE: here, row_pair is thread-local
int localID = threadIdx.x;
int index1, index2;
index1 = (localID + iter) % (P - 1);
if (localID != 0) {
index2 = (P - localID + iter - 1) % (P - 1);
}
else {
index2 = P - 1;
}
row_pair[0] = min(index1, index2);
row_pair[1] = max(index1, index2);
}
__global__ void kernel_compute_all_chess_params(int P, int *device_IterBlockToElem) {
int blockID = blockIdx.x;
//each ONE of the P-1 blocks is responsible for computing chess-tourney parameters for ONE of the P-1 iterations
int index = blockID*P + threadIdx.x*2;
int *row_pair = (int *) malloc(sizeof(int)*2);
chess_tourney_params(P, row_pair, blockID);
device_IterBlockToElem[index] = row_pair[0]; //|=(P-1)X(P/2*2)
device_IterBlockToElem[index+1] = row_pair[1];
free(row_pair);
}
"""
###########################################################################
# STREAM PARALLELIZATION
t = cuda_Transpose()
g = gpuMul()
iterBlock_device = gpuarray.empty(((P-1), np.int(np.ceil(P/2)), 2), np.int32)
mod = compiler.SourceModule(chess_params_kernel_code)
dev_chess = mod.get_function("kernel_compute_all_chess_params")
dev_chess(np.int32(P), iterBlock_device, block = (np.int(P-1), np.int(np.ceil(P/2)), 1),
grid = (np.int(P-1), np.int(P-1),1))
iterBlock = iterBlock_device.get()
# cudaAsynccopy something
D_T = t.transpose_parallel(D)
print("D",D)
print("DT",D_T)
###########################################################################
A = g.MatMul(D_T, np.int32(P), np.int32(N), D, np.int32(N), np.int32(P))
print(A)
eigenvectors = np.ones((P, P), np.float32)
counter = 0
MAX_SWEEPS = 30
EPSILON = 1e-4
THRESHOLD = 1e-4
MAX_BLOCK_SIZE = 1024
MAX_SWEEPS = 30
MAX_ITER = 10000000
MULTIPLY_BLOCK_SIZE = 16
itr = 0
cP = computeParams()
dU = dimUpdate(P)
X = np.zeros((P,P), dtype = np.float32)
while(itr < P - 1):
# Compute rotation parameters: sine and cosine
# for all (p, q), q>p
sin, cos = cP.compute_params(A, np.int32(P), np.int32(itr), iterBlock)
print(sin,cos)
# row update
X = dU.row_update(np.int32(itr), np.float32(A), np.float32(X),
np.int32(P), np.float32(sin), np.float32(cos), iterBlock)
print("X",X)
# col update
eigenvectors = dU.col_update(np.int32(itr), np.float32(A), np.float32(X),
np.int32(P), np.float32(sin), np.float32(cos), iterBlock)
print("Eigenvectors",eigenvectors)
itr += 1
print(itr)
eigenvectors_T = t.transpose_parallel(eigenvectors)
eigenvalues = np.ones(P)
e_indices = np.ones(P)
for i in range(P):
eigenvalues[i] = A[i * P + i]
e_indices[i] = i
# sort eigenvalues in descending order along with corresponding indices
eigenvalues = np.sort(eigenvalues)
new_indices = np.argsort(e_indices)
eigenvalues = np.flip(eigenvalues)
new_indices = np.flip(new_indices)
# compute sigma
SIGMA = np.ones(P, np.float32)
sum_variance = 0.0
sum_variance = np.sum(eigenvalues)
SIGMA = np.sqrt(eigenvalues)
# compute U
for i in range(P):
for j in range(P):
U[i][j] = E[i][new_indices[j]]
# calculate V_T
inv_SIGMA = np.ones((N, P), np.float32)
for i in range(P):
inv_SIGMA[i][i] = 1.0 / SIGMA[i]
U_T = t.transpose_parallel(U)
prod = g.MatMul(inv_SIGMA, N, P, U_T, P, P)
V_T = g.MatMul(prod, N, P, D_T, P, N)
return SIGMA, U, V_T
if __name__ =='__main__':
random.seed(1)
A = np.random.randint(0,9,(3,3)).astype(np.float32)
#initialize A
#A = np.array([[4,0],[3,-5]])
#A = A.astype(np.float32)
#calculate covaiance matrix of A for numpy verification
A1 = np.dot(A.T,A)
#serial jacobi method for SVD
s, u, vt = cudaSVD(A.shape[0],A.shape[1],A)
print(A.shape[1])
#numpy verification
s1,v1 = np.linalg.eig(A1)
#print results
print("Serial Eigenvalues: \n", s)
print("Numpy Eigenvalues: \n",np.sqrt(s1))
print("Serial Eigenvectors: \n", u)
print("Numpy Eigenvectors: \n", v1) |
from rest_framework import serializers
from users.models import User
from courses.models import Course, Membership, Assignment, Environment, CourseCreationRequest
class CourseMembersSerializer(serializers.ModelSerializer):
email = serializers.EmailField(source='user.email')
user_id = serializers.IntegerField(source='user.id', read_only=True)
class Meta:
model = Membership
fields = ('user_id', 'email', 'role')
def create(self, validated_data):
course = self.context.get('course', None)
email = validated_data['user']['email']
user = User.objects.get(email=email)
role = validated_data['role']
return course.add_member(user, role)
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('id', 'title', 'description')
class CourseCreationRequestSerializer(serializers.ModelSerializer):
class Meta:
model = CourseCreationRequest
fields = ('id', 'title', 'description', 'status')
read_only_fields = ('id', 'status')
def create(self, validated_data):
user = self.context.get('user', None)
return CourseCreationRequest.objects.create(user=user, **validated_data)
class EnvironmentSerializer(serializers.ModelSerializer):
course = serializers.ReadOnlyField(source='course.id')
class Meta:
model = Environment
fields = ('id', 'title', 'course', 'tag', 'status', 'dockerfile_content')
read_only_fields = ('id', 'status')
def create(self, validated_data):
course = self.context.get('course', None)
return Environment.objects.create(course=course, **validated_data)
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.tag = validated_data.get('tag', instance.tag)
instance.dockerfile_content = validated_data.get('dockerfile_content', instance.dockerfile_content)
instance.save(update_fields=validated_data.keys())
return instance
class AssignmentSerializer(serializers.ModelSerializer):
course_id = serializers.ReadOnlyField(source='course.id')
class Meta:
model = Assignment
fields = ('id', 'environment', 'course_id', 'title', 'description')
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
instance.save()
return instance
def create(self, validated_data):
course = self.context.get('course', None)
return course.add_assignment(**validated_data)
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from account.decorators import manager_required
from care_point.forms import ContractForm, WorksheetForm
from care_point.models import Contract, Caregiver
from care_point.view import caregiver
@manager_required
def contract(request):
contracts = Contract.objects.all()
return render(request, 'care_point/contract/contract.html', {'contracts': contracts})
@manager_required
def contract_update(request, contract_id):
c = get_object_or_404(Contract, pk=contract_id)
form = ContractForm(data=request.POST or None, instance=c)
if request.method == 'POST':
if form.is_valid():
new = form.save(commit=False)
new.save()
return redirect('care_point:contract')
return render(request, 'care_point/contract/contract_update.html', {'form': form})
@manager_required
def contract_details(request, contract_id):
contract = get_object_or_404(Contract, pk=contract_id)
return render(request, 'care_point/contract/contract_details.html', {'contract': contract})
@manager_required
def contract_delete(request, contract_id):
contract = get_object_or_404(Contract, pk=contract_id)
contract.delete()
return redirect('care_point:contract')
@manager_required
def contract_add(request):
if request.method == 'POST':
form = ContractForm(data=request.POST)
if form.is_valid():
new = form.save(commit=False)
new.save()
return redirect('care_point:contract')
else:
form = ContractForm()
return render(request, 'care_point/contract/contract_add.html', {'form': form})
@manager_required
def contract_add_caregiver(request):
if request.method == 'POST':
form = ContractForm(data=request.POST)
if form.is_valid():
new = form.save(commit=False)
new.save()
return redirect('care_point:caregiver')
else:
form = ContractForm()
return render(request, 'care_point/contract/contract_add.html', {'form': form})
@manager_required
def next_contract(request, caregiver_id):
if request.method == 'POST':
form = ContractForm(data=request.POST)
if form.is_valid():
new = form.save(commit=False)
caregiver = get_object_or_404(Caregiver, pk=caregiver_id)
new.save()
caregiver.contract_set.add(new)
return redirect('care_point:caregiver_details', caregiver_id=caregiver_id)
return render(request, 'care_point/contract/contract_add.html', {'form': form})
else:
form = ContractForm()
return render(request, 'care_point/contract/contract_add.html', {'form': form})
#
# @login_required
# def new_worksheet_caregiver(request, caregiver_id):
# if request.method == 'POST':
# form = WorksheetForm(data=request.POST)
# if form.is_valid():
# new = form.save(commit=False)
# new.save()
# caregiver_id_new = new.caregiver.id
# return redirect('care_point:caregiver')
# # return render(request, 'care_point/caregiver/caregiver_details.html', {'caregiver_id': caregiver_id})
# else:
# form = WorksheetForm()
# return render(request, 'care_point/worksheet/worksheet_add.html', {'form': form})
|
import json
import os
with open("stars.json") as g:
stars = json.load(g)
path =os.getcwd() + '/star_files/'
for i in range(len(stars)):
with open(path + 'star_'+ str(i) + '.json', 'w') as h:
json.dump(stars[i], h, indent=4)
|
# -*- coding:utf8 -*-
from MysqlHelper import MysqlHelper
sql='delete from users where id=9'
h=MysqlHelper('localhost',3306,'guanxi','root','Nmamtf@013')
res=h.cud(sql, [])
#res=h.cud(sql) # no
|
# coding=utf-8
import json
from collections import defaultdict
from elasticsearch import Elasticsearch
from elasticsearch.client import CatClient, IndicesClient, NodesClient
client = Elasticsearch('54.222.177.58:9200')
# client = Elasticsearch('54.223.226.77:9200')
cat_client = CatClient(client)
ind_client = IndicesClient(client)
node_client = NodesClient(client)
def get_indices():
result = cat_client.indices(format='json')
result = [ind['index'] for ind in result if ind['index'].startswith('can_')]
# result = [ind['index'] for ind in result if ind['index']]
return result
def get_indices_from_db():
indices = []
with open('indices.txt') as f:
for l in f:
indices.append(l.strip())
return indices
def create_index_with_alias(index):
if ind_client.exists(index):
ind_client.delete(index)
ind_client.create(index)
if index.startswith('can_'):
alias = index[4:]
ind_client.put_alias(index, alias)
def put_mapping(index, estype, mapping_file):
print('doc type: {}/{}'.format(index, estype))
with open(mapping_file) as f:
mapping = f.read()
resp = ind_client.put_mapping(estype, mapping, index=index)
print(resp)
return resp
def init_index(index):
print('start to init index: ', index)
create_index_with_alias(index)
put_mapping(index, 'job', 'job.json')
put_mapping(index, 'candidate', 'candidate.json')
put_mapping(index, 'job_candidate', 'job_candidate.json')
put_mapping(index, 'city', 'city.json')
def get_nodes():
print(node_client.info())
if __name__ == '__main__':
# indices = get_indices()
# json.dump(indices, open('indices.json', 'w'), ensure_ascii=False)
indices = get_indices_from_db()
# indices = ['can_ocean', 'can_tenant_neitui', 'can_tenant_jobs', 'can_tenant_qushixi']
for index in indices:
init_index(index)
|
# DESCRIPTION: This file implements an SVD-based kernel ridge regression model. For each user, the V matrix from SVD is fed to the ridge regression as features and the observed ratings as targets. The V matrix is normalized for each item. exp(2(xi.T*xj+1)) is used as the kernel.
# USAGE: To tarin the model, run "python3 code/KRR.py -k=32", "python3 code/KRR.py -k=32 -d=1", "python3 code/KRR.py -i=2 -k=32" and "python3 code/KRR.py -i=2 -k=32 -d=1". "-i" specifies the SVD model used as the base, "-k" specifies the number of dimension used in the SVD model and "-d" chooses the data split.
import Globals
from sklearn.kernel_ridge import KernelRidge
import numpy as np
import Initialization
import SVD
def kernel(x1,x2):
return np.exp(2*(np.dot(x1,x2)-1))
# count the number of observed ratings for each movie
def topRatedMovies(data):
for i in range(Globals.nItems):
count = np.count_nonzero(data[:,i])
print(count,)
# train and predict
def KRR(data,test, a=0.7):
suffix = '_fixed'+Globals.dataIdx+'.npy'
if not Globals.fixed:
suffix = '.npy'
known = data!=0
# read from previous trained result
if Globals.step == 0:
A = np.empty((Globals.nUsers,Globals.nItems))
else:
A = np.load('./log/KRR'+Globals.modelIdx+'_A_'+str(Globals.k)+suffix)
Vt = np.load('./log/RSVDF'+Globals.modelIdx+'_V_'+str(Globals.k)+suffix)
V = Vt.T
# normalize
for i in range(Globals.nItems):
V[i] /= np.linalg.norm(V[i])
# regression starts here
for i in range(Globals.step,Globals.nUsers):
known = data[i]!=0
y = data[i,known]
X = V[known]
clf = KernelRidge(alpha=a,kernel=kernel)
clf.fit(X, y)
pred = clf.predict(V)
A[i] = pred
if i%10 == 0:
print('user ',i+1)
score = SVD.evaluation2(A,test)
print('score =',score)
if i%1000 == 0:
np.save('./log/KRR'+Globals.modelIdx+'_A_'+str(Globals.k)+suffix,A)
score = SVD.evaluation2(A,test)
print('alpha =', a, 'test error =',score)
#clipping
mask = A>5
A[mask] = 5
mask = A<1
A[mask] = 1
score = SVD.evaluation2(A,test)
print('after clipping test error =',score)
return A
def chooseAlpha(data,test):
for a in np.arange(0.5,0.9,0.1):
KRR(data,test,a)
if __name__ == '__main__':
Initialization.initialization()
data, test = Initialization.readInData2(idx=Globals.dataIdx)
# choose the best alpha
if Globals.predict == 'a':
chooseAlpha(data,test)
# train and predict
else:
A = KRR(data,test)
np.save('./log/KRR'+Globals.modelIdx+'_A_'+str(Globals.k)+'_fixed'+Globals.dataIdx+'.npy',A)
|
import sublime
def get_plugin_settings():
setting_name = 'sublime_jedi.sublime-settings'
plugin_settings = sublime.load_settings(setting_name)
return plugin_settings
def get_settings_param(view, param_name, default=None):
plugin_settings = get_plugin_settings()
project_settings = view.settings()
return project_settings.get(
param_name,
plugin_settings.get(param_name, default)
)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from base.model import *
class Agent(BaseSLModel):
def __init__(self, x_space, y_space, x_train, y_train, x_test, y_test, **options):
super(Agent, self).__init__(x_space, y_space, x_train, y_train, x_test, y_test, **options)
self._init_options(options)
self._init_input()
self._init_nn()
self._init_op()
self._init_saver()
self._init_summary_writer()
self.session.run(tf.global_variables_initializer())
def _init_input(self, *args):
self.x_input = tf.placeholder(tf.float32, [None, self.x_space])
self.y_input = tf.placeholder(tf.float32, [None, self.y_space])
def _init_nn(self, *args):
with tf.variable_scope('MLP'):
f_dense = tf.layers.dense(self.x_input, 32, tf.nn.relu)
s_dense = tf.layers.dense(f_dense, 32, tf.nn.relu)
y_predict = tf.layers.dense(s_dense, self.y_space)
self.y_predict = y_predict
def _init_op(self):
with tf.variable_scope('loss_func'):
# self.loss_func = tf.reduce_mean(tf.square(self.y_input - self.y_predict) * tf.abs(self.y_predict))
# self.loss_func = tf.reduce_mean(tf.square(self.y_input - self.y_predict) * tf.square(self.y_input))
self.loss_func = tf.losses.mean_squared_error(self.y_input, self.y_predict)
tf.summary.scalar('mse', self.loss_func)
with tf.variable_scope('optimizer'):
self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_func)
def train(self):
# Get data size.
data_size = len(self.x_train)
for train_step in range(30000):
# Get mini batch.
# indices = np.random.choice(data_size, size=self.batch_size)
# x_batch = self.x_train[indices]
# y_batch = self.y_train[indices]
x_batch = self.x_train
y_batch = self.y_train
# Train op.
ops = [self.optimizer, self.loss_func]
if train_step % 500 == 0:
ops.append(self.merged_summary_op)
# Train.
results = self.session.run(ops, {
self.x_input: x_batch,
self.y_input: y_batch,
})
# Add summary.
if train_step % 500 == 0:
self.summary_writer.add_summary(results[-1], global_step=self.training_step)
# Log loss.
if train_step % 10 == 0:
self.save()
self.logger.warning('Step: {0}, Training loss: {1:.10f}'.format(train_step, results[1]))
self.evaluate()
self.training_step += 1
def predict(self, s):
y_predict = self.session.run(self.y_predict, {self.x_input: s})
return y_predict
def evaluate(self):
y_predict, loss = self.session.run([self.y_predict, self.loss_func], {
self.x_input: self.x_test,
self.y_input: self.y_test
})
self.logger.warning('Step: {0}, Testing loss: {1:.10f}'.format(self.training_step, loss))
if __name__ == '__main__':
x_train = np.linspace(-np.pi, np.pi, num=200).reshape((-1, 1)) + np.random.normal()
y_train = np.sin(x_train)
x_test = np.linspace(-np.pi, np.pi, num=50).reshape((-1, 1))
y_test = np.sin(x_test)
agent = Agent(x_train[0].shape[0],
1,
x_train,
y_train,
x_test,
y_test)
agent.train()
|
class Solution(object):
def numSplits(self, s):
"""
:type s: str
:rtype: int
"""
goodSplit = 0
left = [0] * 26
right = [0] * 26
for i in range(len(s)): # assign all char frequency in right
idx = ord(s[i]) - 97
right[idx] = right[idx] + 1
for i in range(len(s)): # start spliting string so left + 1 then right need to be - 1
idx = ord(s[i]) - 97
left[idx] = left[idx] + 1
right[idx] = right[idx] - 1
distinctLeft = self.getDistinct(left) # get frequency in left string
distinctRight = self.getDistinct(right) # get frequency in right string
if distinctLeft == distinctRight:
goodSplit = goodSplit + 1
return goodSplit
def getDistinct(self, count):
c = 0
for i in count:
if i != 0:
c = c + 1
return c
|
import os
from foodkm import config
from elasticsearch import Elasticsearch
from flask import Flask, jsonify, request
from flask_cors import CORS
import logging
from foodkm.geo_utils import get_latitude_longitude_google_api
app = Flask(__name__)
CORS(app)
es = Elasticsearch(
[os.environ['FOODKM_ES_HOST']],
scheme="http",
port=os.environ['FOODKM_ES_PORT']
)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
def make_search_query(query, lat, lon, fields):
return {
"size": 50,
"_source": True,
"script_fields": {
"distance": {
"script": {
"lang": "painless",
"source": "doc['location'].arcDistance(params.lat,params.lon)",
"params": {
"lat": float(lat),
"lon": float(lon)
}
}
}
},
"query": {
"bool": {
# "must_not": {
# {
# "range": {
# "location.lat": {
# "gte": -0.01, "lte": 0.01
# }
# }
# }
# },
"filter": {
"multi_match": {
"fields": fields,
"query": query,
"type": 'cross_fields',
# "fuzziness": "0",
"operator": "and"
}
},
"should": [
{
"term": {
"product_name": {
"value": query,
"boost": 1.0
}
}
},
{
"term": {
"category_child2": {
"value": query,
"boost": 1.0
}
}
}
]
}
},
# "sort": [
# {
# "price": "asc"
# }
# ],
"suggest": {
"suggestions": {
"text": query,
"term": {
"suggest_mode": "popular",
"min_word_length": 3,
"field": "product_name",
"size": 5,
}
}
}
}
def parse_search_result(hit):
distance = hit['fields']["distance"][0] / 1000
co2 = distance * 10
return {**hit['_source'], 'distance': distance, 'co2': co2}
def parse_search_results(results):
hits = results['hits']['hits']
suggest = results['suggest']['suggestions']
if suggest:
suggest = suggest[0]['options']
return [parse_search_result(h) for h in hits], suggest
@app.route("/search")
def search():
req_args = ['query', 'lat', 'lon']
query_args = {ra: request.args.get(ra) for ra in req_args}
query = make_search_query(**query_args, fields=["product_description", "category_child1", "category_child2", "product_name"])
results = es.search(index="food_in_km", doc_type="_doc", body=query)
# if len(results['hits']['hits']) < 2:
# query = make_search_query(**query_args, fields=["category_child1", "category_child2", "product_name"])
# results = es.search(index="food_in_km", doc_type="_doc", body=query)
results, suggest = parse_search_results(results)
body = {'results': results, 'suggest': suggest}
return jsonify(body)
def get_user_location(postal_code):
address = postal_code + " " + config.USER_COUNTRY
log.info(config.GOOGLE_MAPS_API_KEY)
log.info(address)
geodata = get_latitude_longitude_google_api(
config.GOOGLE_MAPS_API_URL, config.GOOGLE_MAPS_API_KEY, address)
log.info(geodata)
return geodata['lat'], geodata['lon'], geodata['address']
@app.route("/location")
def location():
user_geo_location = {}
try:
lat, lon, address = get_user_location(request.args.get('query'))
user_geo_location['lat'] = lat
user_geo_location['lon'] = lon
user_geo_location['address'] = address
return jsonify(user_geo_location)
except Exception as exp:
return jsonify({'error': str(exp)})
def run():
app.run(debug=True)
|
board = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]
def displayBoard(board):
print(" 1 2 3")
for x in range(0, 3):
print('{} {} | {} | {}'.format(chr(x+65), board[x][0], board[x][1], board[x][2]))
if x < 2:
print(' ____________')
def getRowCol():
while True:
row = input("Enter the row (A, B, C): ")
if row == 'A':
row = 0
break
elif row == 'B':
row = 1
break
elif row == 'C':
row = 2
break
else:
continue
while True:
col = input("Enter the column (1, 2, 3): ")
if col == '1':
col = 0
break
if col == '2':
col = 1
break
if col == '3':
col = 2
break
else:
continue
return row, col
def isCellEmpty(row, col, board):
if board[row][col] == ' ':
return True
else:
return False
def checkWinner(board):
# all the options to win
winningOptions = [
[ [0,0], [0,1], [0,2] ],
[ [1,0], [1,1], [1,2] ],
[ [2,0], [2,1], [2,2] ],
[ [0,0], [1,1], [2,2] ],
[ [0,0], [1,0], [2,0] ],
[ [0,1], [1,1], [2,1] ],
[ [0,2], [1,2], [2,2] ],
[ [0,2], [1,1], [2,0] ]
]
for option in winningOptions:
row1 = option[0][0]
col1 = option[0][1]
row2 = option[1][0]
col2 = option[1][1]
row3 = option[2][0]
col3 = option[2][1]
if (board[row1][col1] == 'x' and board[row2][col2] == 'x' and board[row3][col3] == 'x') or (board[row1][col1] == 'o' and board[row2][col2] == 'o' and board[row3][col3] == 'o'):
return True
else:
continue
displayBoard(board)
player1 = input("Player 1, enter your name: ")
player2 = input("Player 2, enter your name: ")
mark = 'x'
turnCount = 1
while True:
print()
if mark == 'x':
print("{}, it is your turn.".format(player1))
isTurn = player1
else:
print("{}, it is your turn.".format(player2))
isTurn = player2
row, col = getRowCol()
if isCellEmpty(row, col, board) == False:
print()
print("Cell is occupied. Try again")
continue
board[row][col] = mark
displayBoard(board)
if turnCount > 4:
if checkWinner(board):
print()
print('Congratulations {}, you won!!'.format(isTurn))
break
else:
if turnCount == 9:
print()
print("Cat's got the game!")
break
if mark == 'x':
mark = 'o'
else:
mark = 'x'
turnCount += 1 |
#import GPy
import GPyOpt
from numpy.random import seed
def myf(x):
return (2*x)**2
bounds = [{'name': 'var_1', 'type': 'continuous', 'domain': (-1,1)}]
max_iter = 15
myProblem = GPyOpt.methods.BayesianOptimization(myf,bounds)
myProblem.run_optimization(max_iter)
print (myProblem.x_opt)
print (myProblem.fx_opt)
|
import zmq
import random
import sys
import time
import os
import json
from optparse import OptionParser
import base64
import time
import numpy as np
import matplotlib
import hashlib
import matplotlib.pyplot as plt
from jsonschema import validate, ValidationError
def leashsend(socket, mparts):
socket.send_multipart(mparts)
def addauthentication(request, conf):
"""
sign request for authentication
"""
m=hashlib.sha512()
request['time']=time.time()
request['sign']=""
m.update(json.dumps(request, sort_keys=True).encode('utf-8'))
m.update((conf['Secret']+"").encode('utf-8'))
request['sign']=m.hexdigest()
return request
def validateResponse(message):
"""
Validate response from saxsdog server against the schema.
"""
try:
resp=json.loads(message.decode('utf-8'))
respschema=json.load((open(os.path.dirname(__file__)+os.sep+'LeashResultSchema.json')) )
validate(resp, respschema)
return message
except ValidationError as e:
print("\nError in response data format:\n")
print(str(e))
json.dump(resp, open("dump.json", "w"), indent=2)
print("""Message dumped "dump.json""")
return json.dumps({"result":"Error","data":{"Error":str(e)}})
def sendclose(options, arg, socket, conf):
request={"command":"close","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendabort(options, arg, socket, conf):
request={"command":"abort","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendplotdata(options, arg, socket, conf):
request={"command":"plot","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendplot(options, arg, socket, conf):
"""
remote plot visualization for command line mode
"""
plt.ion()
while True:
sendplotdata(options, arg, socket, conf)
object=json.loads( receive(socket))
#print json.dumps(object,indent=4, separators=(',', ': '))
if object["result"]=="Empty":
time.sleep(1)
continue
data=np.array(object['data']['array'][0]).transpose()
skip=options.skip
clip=options.clip
clipat=0
plt.plot(data[skip:-clip, 0], data[skip:-clip, 1])
plt.fill_between( data[skip:-clip, 0],
np.clip(data[skip:-clip, 1]-data[skip:-clip, 2], clipat, 1e300),
np.clip(data[skip:-clip, 1]+data[skip:-clip, 2], clipat, 1e300),
facecolor='blue', alpha=0.2, linewidth=0, label="Count Error")
plt.title(object['data']['filename'])
plt.ylabel('Intensity [counts/pixel]')
plt.xlabel('q [1/nm]')
plt.yscale(options.yax)
plt.xscale(options.xax)
plt.draw()
plt.clf()
time.sleep(.5)
def sendreaddir(options, arg, socket, conf):
"""
read all the files in the set directory and feed them into the processing server
"""
request={"command":"readdir","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendstat(socket, conf):
request={"command":"stat","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendgetmergedata(options, arg, socket, conf):
request={"command":"getmergedata","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendmergestat(socket, conf):
request={"command":"mergestat","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendget(socket, conf):
"""
get current calibration data
"""
request={"command":"get","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')] )
def sendgetfileslist(socket, conf):
"""
get list of chi files
"""
request={"command":"fileslist","argument":{}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def sendlistdir(arg, socket, conf):
"""
get directory contents
"""
if len (arg)<2:
arg.append(".")
if arg[1]=="":
arg[1]="."
request={"command":"listdir","argument":{"directory":arg[1].split(os.sep)}}
leashsend(socket, [json.dumps(addauthentication(request, conf)).encode('utf-8')])
def senddatamerge(options, arg, socket, conf):
cal=json.load(open(arg[1], "r"))
'''Setting Default path for merged data'''
cal["OutputFileBaseName"]=".//results//logs//"
request={
"command":"mergedata",
"argument":{
"mergeconf": cal,
}
}
messageparts=(json.dumps(addauthentication(request, conf)).encode('utf-8'),)
for table in cal["LogDataTables"]:
for filedesc in table["Files"]:
if 'LocalPath' in filedesc and filedesc["LocalPath"]!="":
messageparts+=(json.dumps(
{"filename":filedesc["LocalPath"],
"data":open(filedesc["LocalPath"], "r").read()
}).encode('utf-8'),)
leashsend(socket, messageparts)
def sendnew(options, arg, socket, conf):
"""
upload new calibration for image processing
"""
request={
"command":"new",
"argument":{
"calibration":{},
"data":{}
}
}
if len(arg)>=2:
try:
if isinstance(arg[1], str):
cal=json.load(open(arg[1]))
elif isinstance(arg[1], dict):
cal=arg[1]
calschema=json.load(open(os.path.dirname(__file__)+'/schema.json'))
validate(cal, calschema)
request['argument']['calibration']=cal
except (ValueError) as e:
print(e)
print("The calibration File, "+arg[1]+",is not Valid")
sys.exit()
except ValidationError as e:
print(str(e))
print("The calibration File, "+arg[1]+",is not Valid")
sys.exit()
else:
print("Error")
print("new command:")
print("usage: leash new clibrationfile.json ")
sys.exit()
messageparts=(json.dumps(addauthentication(request, conf)).encode('utf-8'),)
for mask in cal["Masks"]:
maskfile=mask["MaskFile"]
messageparts+=(json.dumps(
{"filename":maskfile,
"data": base64.b64decode(base64.b64encode(open(maskfile, "rb").read())).decode('latin-1','ignore')
}).encode('utf-8'),)
leashsend(socket, messageparts)
def initcommand(options, arg, conf):
"""
Interface for issuing leash commands
"""
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.setsockopt(zmq.LINGER, 1)
if options.server=="":
server=conf['Server']
else:
server=options.server
if __name__=="__main__":print("conecting:", server)
socket.connect (server)
if arg[0]=="close":
result= sendclose(options, arg, socket, conf)
elif arg[0]=="new":
result= sendnew(options, arg, socket, conf)
elif arg[0]=="mergedata":
result=senddatamerge(options, arg, socket, conf)
elif arg[0]=="getmergedata":
result=sendgetmergedata(options, arg, socket, conf)
elif arg[0]=="getmergestat":
result= sendmergestat(socket, conf)
elif arg[0]=="abort":
result= sendabort(options, arg, socket, conf)
elif arg[0]=="plot":
result= sendplot(options, arg, socket, conf)
elif arg[0]=="plotdata":
result=sendplotdata(options, arg, socket, conf)
elif arg[0]=="readdir":
result= sendreaddir(options, arg, socket, conf)
elif arg[0]=="stat":
result= sendstat(socket, conf)
elif arg[0]=="get":
result=sendget(socket, conf)
elif arg[0]=="listdir":
result=sendlistdir(arg, socket, conf)
elif arg[0]=="fileslist":
result=sendgetfileslist(socket, conf)
else:
raise ValueError(arg[0])
answer=receive(socket)
socket.close()
context.term()
return answer.decode('utf-8')
def receive(socket):
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
plist= poller.poll(100000)
if len(plist)==1:
data=socket.recv()
return validateResponse(data)
else:
return json.dumps({"result":"Error","data":{"Error":"Timeout"}}).encode('utf-8')
def parsecommandline(mode=""):
parser = OptionParser()
usage = ("usage: %prog "+
'|'.join(
json.load(open(os.path.dirname(__file__)+'/LeashRequestSchema.json')
)["properties"]["command"]['enum']
) +" [options] [arguments]"
)
parser = OptionParser(usage)
parser.add_option("-S", "--server", dest="server",
help='URL of "Saxsdog Server"', metavar="tcp://HOSTNAME:PORT", default="")
parser.add_option("-s", "--skip", dest="skip",
help="plot: Skip first N points."
, metavar="N", default=0, type="int")
parser.add_option("-k", "--clip", dest="clip",
help="plot: Clip last N points."
, metavar="N", default=1, type="int")
parser.add_option("-x", '--xaxsistype', dest='xax', metavar='TYPE', default='linear',
help="plot: Select type of X axis scale, might be [linear|log|symlog]")
parser.add_option("-y", '--yaxsistype', dest='yax', metavar='TYPE', default='linear',
help="plot: Select type of Y axis scale, might be [linear|log|symlog]")
parser.add_option("-N", '--serverno', dest='serverno', default=0,
help="select server from config list by index default:0")
(options, args) = parser.parse_args(args=None, values=None)
if mode=="commandline" and len(args)<1:
parser.error("incorrect number of arguments")
return (options, args)
def saxsleash():
"""
The command line leash.
"""
(options, arg)=parsecommandline(mode="commandline")
conf=json.load(open(os.path.expanduser("~"+os.sep+".saxsdognetwork")))
validate(conf, json.load(open(os.path.dirname(__file__)+os.sep+'NetworkSchema.json')))
try:
result=initcommand(options, arg, conf[int(options.serverno)])
except ValueError as e:
print('"'+arg[0]+'" is not a valid command. See -h for help.')
print(e)
sys.exit()
print(json.dumps(json.loads(result), indent=4, separators=(',', ': ')))
validateResponse(result)
if __name__ == '__main__':
saxsleash()
|
from torchvision import models
import torch.nn as nn
import pretrainedmodels
from efficientnet_pytorch import EfficientNet
class models_select:
def __init__(self,class_num=2,pretrained=False):
self.class_num=class_num
self.pretrained=pretrained
def net(self,net):
if net=="ResNet50":
Net=models.resnet50(pretrained=self.pretrained)
Net.fc=nn.Linear(2048,self.class_num)
elif net=='DenseNet121':
Net = models.densenet121(pretrained=True)
Net.fc=nn.Linear(1024,self.class_num)
elif net=='SENet':
Net = models.squeezenet1_0(pretrained=True)
Net.classifier = nn.Sequential(nn.Dropout(p=0.5), nn.Conv2d(512, self.class_num, kernel_size=1), nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1)))
elif net=='pnasnet':
model_name = 'pnasnet5large'
if self.pretrained:
Net = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
else:
Net = pretrainedmodels.__dict__[model_name](num_classes=1000)
Net.last_linear = nn.Linear(4320, 2)
Net.eval()
elif net=='efficientNet':
if self.pretrained:
Net=EfficientNet.from_pretrained('efficientnet-b0')
else:
Net=EfficientNet.from_name('efficientnet-b0')
Net._fc = nn.Linear(1280, self.class_num, bias=True)
Net.eval()
return Net
if __name__=='__main__':
print("test")
Net=models_select(class_num=2,pretrained=False)
net=Net.net("efficientNet")
print(Net) |
import uuid
import os
from django.db import models
from django.core.validators import RegexValidator, FileExtensionValidator
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
def user_directory_path(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid.uuid4().hex[:8], ext)
sub_folder = "img"
return os.path.join(instance.user.id, sub_folder, filename)
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, telephone, username, password, **extra_fields):
if not telephone:
return ValueError('手机号码不能为空')
if not username:
return ValueError('用户名不能为空')
if not password:
return ValueError('密码不能为空')
user = self.model(telephone=telephone, username=username, **extra_fields)
user.set_password(password)
user.save()
return user
def create_user(self, telephone, username, password, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(telephone, username, password, **extra_fields)
def create_superuser(self, telephone, username, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
return self._create_user(telephone, username, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
telephone = models.CharField(max_length=11, validators=[RegexValidator(regex=r"^1[345678][0-9]{9}",message="请输入有效的手机号码")], unique=True,verbose_name="手机号码")
username = models.CharField(max_length=20, verbose_name='用户名', unique=True)
img = models.ImageField(upload_to= user_directory_path, verbose_name="用户头像", validators=[FileExtensionValidator(allowed_extensions=['png', 'jpeg', 'jpg'],message='只允许上传png、jpeg、jpg格式的头像')], blank=True, null=True)
email = models.EmailField(blank=True, null=True, verbose_name='邮箱')
is_staff = models.BooleanField(default=False, verbose_name='是否职员')
is_active = models.BooleanField(default=True, verbose_name='是否激活')
date_join = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'telephone'
REQUIRED_FIELDS = ['username']
def __str__(self):
return self.username
class Meta:
verbose_name = '用户表'
verbose_name_plural = '用户表'
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
|
import pandas as pd
import numpy as np
import math
#it's in pip
import unidecode as uni
teams = pd.read_csv('fifa_team.csv', header=None)[2].drop_duplicates().dropna().to_numpy()
players = pd.read_csv('fifa_player.csv')['Club'].drop_duplicates().dropna().to_numpy()
# team team : player team
mapping = {}
for player_team in players:
#add any exact matches
if player_team in teams:
mapping[player_team] = player_team
#add any accents
for team_team in teams:
if uni.unidecode(team_team) == uni.unidecode(player_team):
if team_team not in mapping:
mapping[team_team] = player_team
print(mapping, len(mapping))
|
from mi.instrument.seabird.sbe54tps.driver import SBE54PlusInstrumentDriver
class InstrumentDriver(SBE54PlusInstrumentDriver):
"""
Specialization for this version of the 54 driver
"""
|
#introduction Class
# context
# class ClassName:
# def __init__(self):
# self.Attribute = 0
# def AnotherFunction(self):
# Action(s)
# Primera forma
class Team:
def __init__(self):
self.TeamName = "NaN"
self.TeamOrigin = "NaN"
def DefineTeamName(self,Name):
self.TeamName = Name
def DefineTeamOrigin(self,NameOrigin):
self.TeamOrigin = NameOrigin
Team1 = Team()
print(Team1.TeamName) # Imprime la linea de codigo NaN
print("Name:")
Team1.DefineTeamName("Leons") # agerga "Leons" como Name
print(Team1.TeamName) # imprime Name
print(Team1.TeamOrigin) # Imprime la linea de codigo NaN
print("Origin:")
Team1.DefineTeamOrigin("NYC")# agerga "NYC" como Name
print(Team1.TeamOrigin) # imprime Name
# Segunda Forma
class Team2:
def __init__(self,Name,Origin):
self.TeamName = Name
self.NameOrigin = Origin
Team2 = Team2("Alianza Lima","Lima")
print(Team2.TeamName)
print(Team2.NameOrigin)
# Tercera Forma
class Team3:
def __init__(self,Name = "Universitario", Origin = "Lima"):
self.TeamName = Name
self.NameOrigin = Origin
Team3 = Team3()
print(Team3.TeamName)
print(Team3.NameOrigin) |
import numpy as np
a = [1,2,3,4,5,6]
print(type(a))
b = (np.array(a))
print(type(b))
print(a) # There are commas in list
print(b) |
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
# --------------------------------------------------------
Key = enum("JUMP", "GRAB", "CN", "CU", "CD", "CF", "CB", "VN", "VU", "VD", "VF", "VB")
# --------------------------------------------------------
MoveStyle = enum("ANIM_DRIVEN", "PHYSICS_DRIVEN")
# --------------------------------------------------------
DamageType = enum('NORMAL')
|
#!/usr/bin/env python
import time
import requests
from bs4 import BeautifulSoup
from entities import Route, Itinerary, Trajectory
from parsing import get_routes, get_itineraries, get_active_itinerary, \
get_company, get_price, get_info, \
get_coming_trajectory, get_going_trajectory
ROUTES_URL = 'http://200.238.84.28/site/consulta/itinerarios.asp'
ROUTE_URL = 'http://200.238.84.28/site/consulta/itinerarios.asp?linha={route}'
ITINERARY_URL = ('http://200.238.84.28/site/consulta/itinerarios.asp?'
'linha={route}&nomeitinerario={itinerary}')
if __name__ == "__main__":
routes_result = requests.get(ROUTES_URL)
soup = BeautifulSoup(routes_result.text)
routes = get_routes(soup)
indentation = ''
for route in routes:
time.sleep(3)
route_result = requests.get(ROUTE_URL.format(route=route.code))
soup = BeautifulSoup(route_result.text)
route.company = get_company(soup)
route.price = get_price(soup)
route.info = get_info(soup)
route.itineraries = get_itineraries(soup)
current_itinerary = get_active_itinerary(soup)
print indentation, route.name
indentation += '\t'
for itinerary in route.itineraries:
time.sleep(3)
if itinerary != current_itinerary:
itinerary_result = requests.get(ITINERARY_URL.format(
route=route.code,
itinerary=itinerary.code
))
soup = BeautifulSoup(itinerary_result.text)
itinerary.going = get_going_trajectory(soup)
itinerary.coming = get_coming_trajectory(soup)
print indentation, itinerary.name
indentation += '\t'
print indentation, itinerary.coming.name
indentation += '\t'
for place in itinerary.coming.places:
print indentation, place
indentation = indentation[:-1]
print indentation, itinerary.going.name
indentation += '\t'
for place in itinerary.going.places:
print indentation, place
indentation = indentation[:-2]
indentation = indentation[:-1]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-08 14:48
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Index',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image1', models.ImageField(upload_to='shouye/%Y/%m', verbose_name='首页1')),
('image2', models.ImageField(upload_to='shouye/%Y/%m', verbose_name='首页2')),
('image3', models.ImageField(upload_to='shouye/%Y/%m', verbose_name='首页3')),
('image4', models.ImageField(upload_to='shouye/%Y/%m', verbose_name='首页4')),
('add_time', models.DateField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '首页',
'verbose_name_plural': '首页',
},
),
]
|
class Area:
sqmm, sqcm, sqdm, sqm, sqdam, sqhm, sqkm, sqM, sqyd, sqft, sqinch, ha, acre = [0] * 13
class AreaConverter:
def __init__(self):
self.area = Area()
self.area_conversion_value_table = {
'sqmm': 1, 'sqcm': 100, 'sqdm': 1000, 'sqm': 1e+6, 'sqdam': 1e+8, 'sqhm': 1e+10,
'sqkm': 1e+12, 'sqM': 2.59e+12, 'sqinch': 645.16, 'sqyard': 836127, 'sqft': 92903, 'ha': 1e+10,
'acre': 4.047e+9
}
self.name_conversion_table = {
'square millimeter': 'sqmm', 'squaremillmeter': 'sqmm',
'square decimeter': 'sqdm', 'squaredecimeter': 'sqdm',
'square meter': 'sqm', 'squaremeter': 'sqm',
'square decameter': 'sqdam', 'squaredecameter': 'sqdam',
'square hectometer': 'sqhm', 'squarehectometer': 'sqhm',
'square kilometer': 'sqkm', 'squarekilometer': 'sqkm',
'square mile': 'sqM', 'squaremile': 'sqM',
'square inch': 'sqinch', 'squareinch': 'sqinch',
'square yard': 'sqyd', 'squareyard': 'sqyd',
'squarefoot': 'sqft', 'square foot': 'sqft',
'hectare': 'ha', 'acre': 'acre'
}
self.area_units = [u for u in list(Area.__dict__.keys()) if not u.startswith('__')]
def convert(self, value: float, from_type: str):
from_type = from_type.rstrip('s')
if from_type in list(self.name_conversion_table.values()):
from_type_area_value = self.area_conversion_value_table[from_type]
elif from_type.lower() in list(self.name_conversion_table.keys()):
from_type = self.name_conversion_table[from_type]
from_type_area_value = self.area_conversion_value_table[from_type]
else:
raise KeyError(f'Invalid area unit type "{from_type}"')
value = value * from_type_area_value
for i in self.area_units:
self.area.__setattr__(i, value / self.area_conversion_value_table[i])
return self.area
if __name__ == '__main__':
c = AreaConverter()
s = c.convert(79835462.1234, 'hm')
print(s.__dict__)
|
from django.shortcuts import render,redirect,get_object_or_404
from django.urls import reverse_lazy
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, DetailView, CreateView ,DeleteView ,UpdateView
from django.http import HttpResponse, request
from .models import Post,Comment,Like,Friend
from .forms import PostUpdateForm,CommentForm
from django.http import HttpResponse, request,JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
class PostListView(ListView,):
model = Post
template_name = 'VJgramapp/mainpage.html'
context_object_name = 'posts'
ordering = ['-date_posted']
def get_context_data(self, **kwargs):
context = super(PostListView, self).get_context_data(**kwargs)
user = self.request.user
friends = [friend.following for friend in Friend.objects.filter(user=user)]
likes = [like for like in Like.objects.filter(user_id=user)]
context['friends'] = friends
context['comments'] = Comment.objects.all()
context['likes'] = likes
# And so on for more models
return context
class UserPostListView(ListView):
model = Post
template_name = 'VJgramapp/user_posts.html'
context_object_name = 'posts'
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
def get_context_data(self, **kwargs):
context = super(PostDetailView, self).get_context_data(**kwargs)
postid = get_object_or_404(Post,id=self.kwargs.get('pk'))
context['comments'] = Comment.objects.filter(post_id=postid)
context['likes'] = Like.objects.filter(post_id=postid)
# And so on for more models
return context
class PostCreateView(LoginRequiredMixin,CreateView):
model = Post
fields = ['title','content','image']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
model = Post
#template_name = 'post_form.html'
#context_object_name = 'posts'
fields = ['title','content','image']
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):
model = Post
#template_name = 'post_confirm_delete.html'
success_url = '/home'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class FriendListView(ListView):
model = Friend
template_name = 'VJgramapp/friends.html'
context_object_name = 'friends'
def get_queryset(self):
user = get_object_or_404(User,username=self.kwargs.get('username'))
return Friend.objects.filter(user=user)
class OthersUsersListView(ListView):
model = User
template_name = 'VJgramapp/otherusers.html'
context_object_name = 'otherusers'
def get_context_data(self, **kwargs):
context = super(OthersUsersListView, self).get_context_data(**kwargs)
user = self.request.user
friends = [friend.following for friend in Friend.objects.filter(user=user)]
users=[]
for user in User.objects.all():
if user in friends:
continue;
else:
users.append(user)
context['users'] = users
return context
""" def get_queryset(self,request):
current_user = request.user
user = get_object_or_404(User,username=self.kwargs.get('username'))
return Friend.objects.filter(user=user)
"""
@csrf_exempt
def addFriend(request):
if request.method == 'POST':
user_id = request.POST['user_id']
user = request.user
usert = User.objects.get(id=user_id)
m = Friend(user=user,following=usert) # Creating Friend Object
m.save()
return HttpResponse("Success!") # Sending an success response
else:
return HttpResponse("Request method is not a GET")
@csrf_exempt
def removeFriend(request):
if request.method == 'POST':
friend_id = request.POST['friend_id']
Friend.objects.get(friend_id=friend_id).delete() # Deleting Friend Object
return HttpResponse("Success!")
else:
return HttpResponse("Request method is not a GET")
@csrf_exempt
def likePost(request):
if request.method == 'POST':
post_id = request.POST['post_id']
user_id = request.user
likedpost = Post.objects.get(pk=post_id) #getting the liked posts
m = Like(post_id=likedpost,user_id=user_id,l=True) # Creating Like Object
m.save() # saving it to store in database
return HttpResponse("Success!") # Sending an success response
else:
return HttpResponse("Request method is not a GET")
@csrf_exempt
def commentPost(request):
if request.method == 'POST':
post_id = request.POST['post_id']
user_id = request.user
queryset = user_id.username
comment = request.POST.get('content',False)
commentedpost = Post.objects.get(pk=post_id) #getting the commented posts
m = Comment(post_id=commentedpost,user_id=user_id,content=comment) # Creating Comment Object
m.save()
return JsonResponse({'user_id':queryset,'content':comment})
else:
return HttpResponse("Request method is not a GET")
def home(request):
return render(request,'VJgramapp/home.html')
def about(request):
return render(request,'users/about.html')
def createcomment(request):
return 0
|
from __future__ import annotations
from typing import Optional
from jsonclasses import jsonclass, types
@jsonclass
class TransformName:
name: Optional[str] = types.str.transform(lambda s: s + 'q')
@jsonclass
class CTransformName:
name: Optional[str] = types.str.transform(lambda s, c: s + c.val)
|
from django.urls import path, include
from .import views
from rest_framework import routers
from ReservaBarberia.views import RegistrarBarbero, RegistrarCliente, reserva_delete, reserva_edit, reserva_list, reserva_view
from ReservaBarberia import views
routers = routers.DefaultRouter()
routers.register('ReservaBarberia', views.BarberoList)
app_name = "barberosapp"
urlpatterns = [
path('registrarbarbero/', RegistrarBarbero.as_view(), name='registrar_barbero'),
path('listarbarberos/', views.barberos_listado, name='listar_barberos'),
path('registrocliente/',RegistrarCliente.as_view(),name='registrar_cliente'),
path('listado/',views.reservas_json,name='listado'),
path('listar/',views.reserva_list,name='reserva_list'),
path('nuevo/',views.reserva_view,name='reserva_view'),
path('editar/<codres>/',views.reserva_edit,name='reserva_edit'),
path('delete/<codres>/',views.reserva_delete,name='reserva_delete')
]
|
# -*- coding: utf-8 -*-
"""
@author: Manuel
"""
import time
reading_waiting_time = 0
def read_data_from_sensor():
hour = '10:00' # Hour
temperature = '18.00' # Temperature
humidity = '75.00' # Humidity
time.sleep(reading_waiting_time)
return (hour, temperature, humidity)
|
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK
)
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
from django.views.decorators.csrf import csrf_exempt
from rest_framework.permissions import AllowAny
from rest_framework.decorators import api_view, permission_classes
from django.contrib.auth.models import User
from rest_framework.views import APIView
from .serializers import UserSerializer, UserTypeSerializer, UserTypeUpdateSerializer, UserExtensionSerializer, \
UserExtensionUpdateSerializer, CarouselSerializer, ContactSerializer, GallerySerializer, SubscriptionSerializer, \
BMRCalculatorSerializer, BMRValuesSerializer, FindTrainerSerializer, CouponSerializer, UserSubscriptionSerializer
from django.http import HttpResponse, Http404
from rest_framework import viewsets, generics, status
from .models import UserType, UserExtension, Carousel, ContactModel, Gallery, SubscriptionPlan, BMRValues, FindTrainer, Coupon, UserSubscription
# from twilio.rest import Client
import os
from rest_framework.parsers import MultiPartParser, FormParser, FileUploadParser
@csrf_exempt
@api_view(["POST"])
@permission_classes((AllowAny,))
def login_admin(request):
username = request.data.get("username")
password = request.data.get("password")
if username is None or password is None:
return Response({'error': 'Please provide both username and password'},
status=HTTP_400_BAD_REQUEST)
user = authenticate(username=username, password=password)
user_ext = UserExtension.objects.get(user=user)
if not user or str(user_ext.user_type) != 'Admin':
return Response({'error': 'Invalid Credentials'},
status=HTTP_404_NOT_FOUND)
token, _ = Token.objects.get_or_create(user=user)
user = User.objects.get(username=user)
return Response({'token': token.key, 'user': user.id, 'username': user.username},
status=HTTP_200_OK)
@csrf_exempt
@api_view(["POST"])
@permission_classes((AllowAny,))
def login_customer(request):
username = request.data.get("username")
password = request.data.get("password")
if username is None or password is None:
return Response({'error': 'Please provide both username and password'},
status=HTTP_400_BAD_REQUEST)
user = authenticate(username=username, password=password)
user_ext = UserExtension.objects.get(user=user)
if not user or str(user_ext.user_type) != 'Customer':
return Response({'error': 'Invalid Credentials'},
status=HTTP_404_NOT_FOUND)
token, _ = Token.objects.get_or_create(user=user)
user = User.objects.get(username=user)
return Response({'token': token.key, 'user': user.id, 'username': user.username},
status=HTTP_200_OK)
@csrf_exempt
@api_view(['POST'])
@permission_classes((AllowAny,))
def register(request):
username = request.data.get("username")
password = request.data.get("password")
email = request.data.get("email")
first_name = request.data.get("first_name")
if username is None or password is None:
return Response({'error': 'Please provide both username and password'},
status=HTTP_400_BAD_REQUEST)
user = User()
user.username = username
user.email = email
user.first_name = first_name
user.set_password(password)
user.save()
Token.objects.get_or_create(user=user)
return Response({'user_id': user.id},
status=HTTP_200_OK)
class UserListView(viewsets.ViewSet):
def userList(self, request):
queryset = User.objects.all()
serializer = UserSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
User = self.get_object(pk)
User.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserExtensionListView(viewsets.ViewSet):
def userExtList(self, request):
queryset = UserExtension.objects.filter(user_type=1)
serializer = UserExtensionSerializer(queryset, many=True)
return Response(serializer.data)
class UserUpdateDetail(APIView):
def get_object(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk):
user = self.get_object(pk)
User = UserSerializer(user)
return Response(User.data)
def put(self, request, pk, format=None):
User = self.get_object(pk)
serializer = UserSerializer(User, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
User = self.get_object(pk)
User.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@permission_classes((AllowAny,))
class UserExtensionPostView(viewsets.ViewSet):
def user_extension_list(self, request):
queryset = UserExtension.objects.all()
serializer = UserExtensionUpdateSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserExtensionUpdateSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
user_extension = self.get_object(pk)
user_extension.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserTypePostView(viewsets.ViewSet):
def user_type_list(self, request):
queryset = UserType.objects.all()
serializer = UserTypeUpdateSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserTypeUpdateSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
user_type = self.get_object(pk)
user_type.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserTypeListView(viewsets.ViewSet):
def userTypeList(self, request):
queryset = UserType.objects.all()
serializer = UserTypeSerializer(queryset, many=True)
return Response(serializer.data)
class TrainerListView(viewsets.ViewSet):
def userTrainerList(self, request):
queryset = UserExtension.objects.filter(user_type=2)
serializer = UserExtensionSerializer(queryset, many=True)
return Response(serializer.data)
class CustomerListView(viewsets.ViewSet):
def userCustomerList(self, request):
queryset = UserExtension.objects.filter(user_type=3)
serializer = UserExtensionSerializer(queryset, many=True)
return Response(serializer.data)
class UserExt(APIView):
def get_object(self, pk):
try:
return UserExtension.objects.get(user=pk)
except User.DoesNotExist:
raise Http404
def get(self, request, pk):
user_ext = self.get_object(pk)
User_ext = UserExtensionSerializer(user_ext)
return Response(User_ext.data)
def put(self, request, pk, format=None):
user_ext = self.get_object(pk)
serializer = UserExtensionUpdateSerializer(user_ext, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CarouselImageUploadView(APIView):
parser_class = (FileUploadParser,)
def post(self, request, *args, **kwargs):
carousel_serializer = CarouselSerializer(data=request.data)
if carousel_serializer.is_valid():
carousel_serializer.save()
return Response(carousel_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(carousel_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@permission_classes((AllowAny,))
class ImageView(generics.ListAPIView):
queryset = Carousel.objects.all()
serializer_class = CarouselSerializer
@permission_classes((AllowAny,))
class ContactListView(viewsets.ViewSet):
def contactList(self, request):
queryset = ContactModel.objects.all()
serializer = ContactSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = ContactSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@permission_classes((AllowAny,))
class GalleryView(viewsets.ViewSet):
def galleryImages(self, request):
queryset = Gallery.objects.all()
serializer = GallerySerializer(queryset, many=True)
return Response(serializer.data)
@permission_classes((AllowAny,))
class SubscriptionPlans(viewsets.ViewSet):
def plan_list(self, request):
queryset = SubscriptionPlan.objects.all()
serializer = SubscriptionSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = SubscriptionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SubscriptionEditDelete(APIView):
def get_object(self, pk):
try:
return SubscriptionPlan.objects.get(pk=pk)
except SubscriptionPlan.DoesNotExist:
raise Http404
def get(self, request, pk):
plan = self.get_object(pk)
Plan = SubscriptionSerializer(plan)
return Response(Plan.data)
def put(self, request, pk, format=None):
plan_obj = self.get_object(pk)
serializer = SubscriptionSerializer(plan_obj, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
plan_obj = self.get_object(pk)
plan_obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class BMRCalculator(APIView):
def get_object(self, pk):
try:
return UserExtension.objects.get(user=pk)
except UserExtension.DoesNotExist:
raise Http404
def get(self, request, pk):
user = self.get_object(pk)
User = BMRCalculatorSerializer(user, context={"request": request})
return Response(User.data)
def put(self, request, pk):
user = self.get_object(pk)
serializer = BMRCalculatorSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class BMRValuesByUser(APIView):
def get_object(self, pk):
try:
obj = BMRValues.objects.filter(user=pk)
return obj
except BMRValues.DoesNotExist:
raise Http404
def get(self, request, pk):
bmr = self.get_object(pk)
BMR = BMRValuesSerializer(bmr, many=True, context={"request": request})
return Response(BMR.data)
class PostBMR(viewsets.ViewSet):
def bmr_list(self, request):
queryset = BMRValues.objects.all()
serializer = BMRValuesSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = BMRValuesSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def handle__uploaded_file(f):
if not os.path.isdir("media/uppy_images/"):
os.makedirs("media/uppy_images/")
with open('media/uppy_images/'+f.name, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return f.name
@permission_classes((AllowAny,))
class ProductUploadImage(APIView):
parser_classes = (MultiPartParser, FormParser)
def get_serializer_context(self):
print(self.request.FILES)
def post(self, request, format=None):
res = {}
for i in self.request.FILES:
array = {}
array['success'] = 1
res['url'] = 'http://www.mytruestrength.com/backend/media/uppy_images/' + handle__uploaded_file(self.request.FILES[i])
array['file'] = res
return Response(array)
class GalleryImageUpdate(APIView):
def get_object(self, pk):
try:
return Gallery.objects.get(id=pk)
except Gallery.DoesNotExist:
raise Http404
def get(self, request, pk):
obj = self.get_object(pk)
Obj = GallerySerializer(obj, context={"request": request})
return Response(Obj.data)
def put(self, request, pk):
obj = self.get_object(pk)
serializer = GallerySerializer(obj, data=request.data, context={"request": request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GalleryImages(viewsets.ViewSet):
def images(self, request):
queryset = Gallery.objects.all()
serializer = GallerySerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = GallerySerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@permission_classes((AllowAny,))
class FindTrainerView(viewsets.ViewSet):
def list(self, request):
queryset = FindTrainer.objects.all()
serializer = FindTrainerSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = FindTrainerSerializer(data=request.data)
# account_sid = "ACa5cd6a809b1ddd9b8f111a6a9bdd9c0f"
# auth_token = "21ea5512e4f0ccb10ae519c6b8530e17"
# client = Client(account_sid, auth_token)
#
# client.messages.create(
# to="+91"+request.data['phone'],
# from_="+19105579284",
# body="Hii "+request.data['name'] + ', ' + 'we will find a trainer near you and contact you shortly. Join Transformers Fitness Academy today.',
# media_url="https://climacons.herokuapp.com/clear.png")
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GenerateCoupon(viewsets.ViewSet):
def coupon_list(self, request):
queryset = Coupon.objects.all()
serializer = CouponSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = CouponSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GetCouponCodeByUser(APIView):
def get_object(self, pk):
try:
return Coupon.objects.get(user=pk)
except Coupon.DoesNotExist:
raise Http404
def get(self, request, pk):
obj = self.get_object(pk)
Obj = CouponSerializer(obj)
return Response(Obj.data)
def put(self, request, pk):
obj = self.get_object(pk)
serializer = CouponSerializer(obj, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserSubscriptionPost(viewsets.ViewSet):
def s_list(self, request):
queryset = UserSubscription.objects.all()
serializer = UserSubscriptionSerializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
serializer = UserSubscriptionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
#傑卡德相似係數 Jaccard Similarity Coefficient
import numpy as np
import scipy.spatial.distance as dist
mat1 = [1,1,0,1,0,1,0,0,1]
mat2 = [0,1,1,0,0,0,1,1,1]
mat3 = [1,1,0,1,0,1,0,0,1] #the same as mat1
mat4 = [0,0,1,0,1,0,1,1,0] #invert of mat1
matV = np.mat([mat1,mat4])
print('dist.jaccard : ')
print(dist.pdist(matV, 'jaccard'))
print('------------------------------------------------------------') #60個
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
from scipy import special
print('------------------------------------------------------------') #60個
a = special.exp10(3)
print('10^3 =', a)
b = special.exp2(3)
print('2^3 =', b)
c = special.sindg(90)
print('sind(90) =', c)
d = special.cosdg(45)
print('cosd(45) =', d)
print('畫出10^x, x=0~1.0')
x = np.linspace(0, 1, 100)
y = special.exp10(x)
plt.plot(x, y)
plt.show()
print('------------------------------------------------------------') #60個
print('積分')
def func(x):
return special.exp10(x)
area, err = integrate.quad(func, 0, 1)
print(area)
def half_circle(x):
return (1-x**2)**0.5
area, err = integrate.quad(half_circle, -1, 1)
print(area)
print('------------------------------------------------------------') #60個
import numpy as np
from scipy import linalg
A = np.array([[2,3], [5,7]])
B = linalg.inv(A)
print(B)
A = np.array([[3,8], [4,6]])
B = linalg.det(A)
print(B)
a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
b = np.array([2, 4, -1])
x = linalg.solve(a, b)
print(x)
print('------------------------------------------------------------') #60個
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def f(x):
return x**2 + 15*np.sin(x)
x = np.arange(-10, 10, 0.1)
plt.plot(x, f(x))
plt.show()
print('------------------------------------------------------------') #60個
result = optimize.minimize(f, x0=0)
print(result.x)
plt.plot(x, f(x))
plt.plot(result.x, f(result.x), "o")
plt.show()
print('------------------------------------------------------------') #60個
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy import special
x = np.arange(5, 20)
y = special.exp2(x/3.0)
plt.plot(x, y, 'o')
plt.show()
print('------------------------------------------------------------') #60個
f = interpolate.interp1d(x, y)
x1 = np.arange(5, 20)
y1 = f(x1)
plt.plot(x, y, "o", x1, y1, "--")
plt.show()
print('------------------------------------------------------------') #60個
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
def normal_pdf(x, mu, sigma):
pi = 3.1415926
e = 2.718281
f = (1./np.sqrt(2*pi*sigma**2))*e**(-(x-mu)**2/(2.*sigma**2))
return f
ax = np.linspace(-5, 5, 100)
ay = [normal_pdf(x, 0, 1) for x in ax]
plt.plot(ax, ay)
plt.show()
x = [x/10.0 for x in range(-50, 60)]
plt.plot(x, stats.norm.pdf(x, 0, 1),
'r-',lw=1,alpha=0.6,label='mu=0,sigma=1')
plt.plot(x, stats.norm.pdf(x, 0, 2),
'b--',lw=1,alpha=0.6,label='mu=0,sigma=2')
plt.plot(x, stats.norm.pdf(x, 2, 1),
'g-.',lw=1,alpha=0.6,label='mu=2,sigma=1')
plt.legend()
plt.title("Various Normal PDF")
plt.show()
samples = [9, 3, 27]
desc = stats.describe(samples)
print(desc)
samples2 = [[1, 3, 27],
[3, 4, 6],
[7, 6, 3],
[3, 6, 8]]
desc = stats.describe(samples2, axis = 0)
print(desc)
desc = stats.describe(samples2, axis = 1)
print(desc)
print('------------------------------------------------------------') #60個
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
t = np.linspace(6, 10, 500)
w = signal.chirp(t,f0=4,f1=2,t1=5,method='linear')
plt.plot(t, w)
plt.title("Linear Chirp")
plt.xlabel('time in sec)')
plt.show()
img = np.load("data/digit8.npy")
plt.figure()
plt.imshow(img, cmap="gray")
plt.axis("off")
plt.show()
print('------------------------------------------------------------') #60個
edge = [
[0, 1, 0],
[1,-4, 1],
[0, 1, 0]
]
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(img, cmap="gray")
plt.axis("off")
plt.title("original image")
plt.subplot(1, 2, 2)
c_digit = signal.convolve2d(img, edge,
boundary="symm",
mode="same")
plt.imshow(c_digit, cmap="gray")
plt.axis("off")
plt.title("edge-detection image")
plt.show()
print('------------------------------------------------------------') #60個
sharpen = [
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]
]
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(img, cmap="gray")
plt.axis("off")
plt.title("original image")
plt.subplot(1, 2, 2)
c_digit = signal.convolve2d(img, sharpen,
boundary="symm",
mode="same")
plt.imshow(c_digit, cmap="gray")
plt.axis("off")
plt.title("sharpen image")
plt.show()
print('------------------------------------------------------------') #60個
img = np.load("data/digit3.npy")
filters = [[
[-1, -1, -1],
[ 1, 1, 1],
[ 0, 0, 0]],
[[-1, 1, 0],
[-1, 1, 0],
[-1, 1, 0]],
[[ 0, 0, 0],
[ 1, 1, 1],
[-1, -1, -1]],
[[ 0, 1, -1],
[ 0, 1, -1],
[ 0, 1, -1]]]
plt.figure()
plt.subplot(1, 5, 1)
plt.imshow(img, cmap="gray")
plt.axis("off")
plt.title("original")
for i in range(2, 6):
plt.subplot(1, 5, i)
c = signal.convolve2d(img,filters[i-2],
boundary="symm",
mode="same")
plt.imshow(c, cmap="gray")
plt.axis("off")
plt.title("filter"+str(i-1))
plt.show()
print('------------------------------------------------------------') #60個
|
# coding: utf-8
# impares_1
# raquel ambrozio
for numeros in range(1, 101, 2):
if numeros % 3 == 0 or numeros % 5 == 0:
numeros = "*"
print numeros
|
# Generated by Django 2.2.4 on 2021-06-11 03:02
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20, verbose_name='酒店名字')),
('entitle', models.CharField(max_length=50, verbose_name='英文名字')),
('score', models.FloatField(max_length=10, verbose_name='酒店评分')),
('city', models.CharField(max_length=10, verbose_name='所在城市')),
('address', models.CharField(max_length=50, verbose_name='地址')),
('checkin', models.CharField(max_length=50, verbose_name='入住时间')),
('checkout', models.CharField(max_length=50, verbose_name='离店时间')),
('year', models.IntegerField(blank=True, max_length=2021, null=True, verbose_name='建成年份')),
('scale', models.CharField(blank=True, max_length=50, null=True, verbose_name='酒店规模')),
('near1', models.CharField(blank=True, max_length=20, null=True, verbose_name='附近景点1')),
('near2', models.CharField(blank=True, max_length=20, null=True, verbose_name='附近景点2')),
('near3', models.CharField(blank=True, max_length=20, null=True, verbose_name='附近景点3')),
('method', models.TextField(blank=True, null=True, verbose_name='酒店攻略')),
('tip', models.TextField(blank=True, null=True, verbose_name='出行提示')),
('main', models.CharField(choices=[('免费wifi', '免费wifi'), ('免费停车场', '免费停车场'), ('电梯', '电梯'), ('餐厅', '餐厅'), ('中文服务', '中文服务'), ('免费瓶装水', '免费瓶装水'), ('吹风机', '吹风机')], max_length=50, verbose_name='主要设施')),
('service', models.CharField(choices=[('外币兑换服务', '外币兑换服务'), ('洗衣服务', '洗衣服务'), ('旅游服务', '旅游服务'), ('客房服务', '客房服务'), ('干洗服务', '干洗服务'), ('快速入住服务', '快速入住服务'), ('每日客房清洁', '每日客房清洁'), ('礼宾服务', '礼宾服务'), ('英语', '英语'), ('中文-国语', '中文-国语'), ('日语', '日语'), ('24小时办理入住', '24小时办理入住')], max_length=50, verbose_name='酒店服务')),
('hfacility', models.CharField(choices=[('咖啡厅', '咖啡厅'), ('酒吧', '酒吧'), ('健身房', '健身房'), ('商店', '商店')], max_length=50, verbose_name='酒店设施')),
('rfacility', models.CharField(choices=[('叫醒服务', '叫醒服务'), ('保险箱', '保险箱'), ('浴衣', '浴衣'), ('浴缸', '浴缸'), ('宽带上网', '宽带上网'), ('空调', '空调'), ('洗漱用品', '洗漱用品'), ('电视', '电视'), ('衣架', '衣架'), ('冰箱', '冰箱'), ('毛巾', '毛巾'), ('拖鞋', '拖鞋'), ('书桌', '书桌'), ('铺设地毯的地板', '铺设地毯的地板'), ('闹钟', '闹钟'), ('电话', '电话')], max_length=50, verbose_name='房间设施')),
('views', models.PositiveIntegerField(default=0, verbose_name='浏览量')),
],
),
migrations.CreateModel(
name='UserComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=20, null=True, verbose_name='用户昵称')),
('content', models.TextField(blank=True, null=True, verbose_name='评价内容')),
('index', models.IntegerField(blank=True, max_length=5, null=True, verbose_name='评价指数')),
('commentDate', models.DateTimeField(default=django.utils.timezone.now, max_length=20, verbose_name='发布时间')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userComment', to='hotelApp.Hotel', verbose_name='用户评价')),
],
),
migrations.CreateModel(
name='RoomPrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roomType', models.CharField(blank=True, max_length=20, null=True, verbose_name='房型')),
('roomPeople', models.CharField(blank=True, max_length=20, null=True, verbose_name='可住人数')),
('roomPrice', models.CharField(blank=True, max_length=20, null=True, verbose_name='房价')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='roomPrice', to='hotelApp.Hotel', verbose_name='历史房型房价')),
],
),
migrations.CreateModel(
name='HotelImgs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photoType', models.CharField(choices=[('外观', '外观'), ('内景', '内景'), ('房间', '房间'), ('环境', '环境'), ('餐厅', '餐厅'), ('其他', '其他')], max_length=50, verbose_name='图片类型')),
('photo', models.ImageField(blank=True, max_length=6, upload_to='Hotel/Comment/', verbose_name='评价图片')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hotelImgs', to='hotelApp.Hotel', verbose_name='酒店图片')),
],
),
migrations.CreateModel(
name='CommentImgs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, max_length=6, upload_to='Hotel/Comment/', verbose_name='评价图片')),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userImgs', to='hotelApp.UserComment', verbose_name='评价图片')),
],
),
]
|
#! /usr/bin/python
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from ansible_storlet_management_vars import mgmt_vars
class Inventory(object):
'''
Ansible inventory , generated from config file
'''
def __init__(self, fname):
self.__load_config__(fname)
def __load_config__(self, name):
with open(name) as f:
self.conf = json.loads(f.read())
def show_list(self):
g = {}
for group in ['storlet-mgmt', 'storlet-proxy', 'storlet-storage',
'docker']:
g[group] = dict()
g[group]['hosts'] = self.conf['groups'][group]
g[group]['vars'] = dict()
g[group]['vars'].update(self.conf['all'])
return g
def show_host(self, name):
res = dict()
res['ansible_ssh_user'] = self.conf['all']['ansible_ssh_user']
return res
def write_inventory(self, inventory_file):
inventory = dict()
inventory['groups'] = self.conf['groups']
for group in ['storlet-mgmt', 'storlet-proxy', 'storlet-storage',
'docker']:
for host in self.conf['groups'][group]:
if host not in inventory:
inventory[host] = dict()
if group == 'storlet-mgmt':
inventory[host]['ansible_ssh_user'] =\
self.conf['all']['storlets_management_user']
else:
inventory[host]['ansible_ssh_user'] =\
self.conf['all']['ansible_ssh_user']
all_vars = dict()
for v in mgmt_vars:
all_vars[v] = self.conf['all'][v]
inventory['all'] = all_vars
with open(inventory_file, 'w') as f:
f.write(json.dumps(inventory))
return inventory
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--list', action='store_true')
parser.add_argument('--host')
parser.add_argument('--inventory')
args = parser.parse_args()
inventory = Inventory('deploy/cluster_config.json')
out = {}
if args.list:
out = inventory.show_list()
if args.host:
out = inventory.show_host(args.host)
if args.inventory:
out = inventory.write_inventory(args.inventory)
print(json.dumps(out))
if __name__ == '__main__':
main()
|
import os
import chess
import chess.engine
import chess.pgn
from Interface import interface
import chess.svg
import time
engine = chess.engine.SimpleEngine.popen_uci("/usr/games/stockfish")
cwd_addr = os.getcwd()
game_file = open(cwd_addr+"/Database/MagnusCarlsen/2017/3/6.pgn", 'r')
game = chess.pgn.read_game(game_file)
board = game.board()
moves = list(game.mainline_moves())
chess_interface = interface()
chess_interface.update_board(board)
for move in moves:
time.sleep(2)
board.push(move)
chess_interface.update_board(board)
print("\n")
# def top_ten_moves(analysis_board, analysis_engine):
# best_moves = []
# for legal_move in analysis_board.legal_moves:
# analysis_board.push(legal_move)
# print(analysis_board.turn)
# info = analysis_engine.analyse(analysis_board, chess.engine.Limit(depth=10))
# best_moves.append([legal_move, info["score"].relative])
# analysis_board.pop()
# best_moves.sort(key=lambda x: x[1])
# return best_moves[0:10]
#
#
# this_list = top_ten_moves(board, engine)
# print(board.turn)
# print(this_list)
|
import unittest
from collections import Counter
# 97
def checkOneOff(shortWord, longWord):
lenShort, lenLong = 0, 0
oneOff = False
while lenShort < len(shortWord) and lenLong < len(longWord):
if shortWord[lenShort] != longWord[lenLong]:
if oneOff:
return False
oneOff = True
lenLong += 1
else:
lenLong += 1
lenShort += 1
return True
def checkDiff(word1, word2):
oneOff = False
for i in range(len(word1)):
if word1[i] != word2[i]:
if oneOff:
return False
oneOff = True
return True
def one_away(word1, word2):
print(word1, word2)
if len(word1)+1 == len(word2): # word1 is shorter than word2
return checkOneOff(word1, word2)
elif len(word1)-1 == len(word2): # word2 is shorter than word1
return checkOneOff(word2, word1)
elif len(word1) == len(word2):
return checkDiff(word1, word2)
return False
class Test(unittest.TestCase):
'''Test Cases'''
data = [
('pale', 'ple', True),
('pales', 'pale', True),
('pale', 'bale', True),
('paleabc', 'pleabc', True),
('pale', 'ble', False),
('a', 'b', True),
('', 'd', True),
('d', 'de', True),
('pale', 'pale', True),
('pale', 'ple', True),
('ple', 'pale', True),
('pale', 'bale', True),
('pale', 'bake', False),
('pale', 'pse', False),
('ples', 'pales', True),
('pale', 'pas', False),
('pas', 'pale', False),
('pale', 'pkle', True),
('pkle', 'pable', False),
('pal', 'palks', False),
('palks', 'pal', False)
]
def test_one_away(self):
for [test_s1, test_s2, expected] in self.data:
actual = one_away(test_s1, test_s2)
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main() |
sentences = [
'Taki mamy klimat',
'Wszędzie dobrze ale w domu najlepiej',
'Wyskoczył jak Filip z konopii',
'Gdzie kucharek sześć tam nie ma co jeść',
'Nie ma to jak w domu',
'Konduktorze łaskawy zabierz nas do Warszawy',
'Jeżeli nie zjesz obiadu to nie dostaniesz deseru',
'Bez pracy nie ma kołaczy',
'Kto sieje wiatr ten zbiera burzę',
'Być szybkim jak wiatr',
'Kopać pod kimś dołki',
'Gdzie raki zimują',
'Gdzie pieprz rośnie',
'Swoją drogą to gdzie rośnie pieprz?',
'Mam nadzieję, że poradzisz sobie z tym zadaniem bez problemu',
'Nie powinno sprawić żadnego problemu, bo Google jest dozwolony',
]
word_list = []
words_frequency = {}
for sentence in sentences:
words = sentence.split()
for word in words:
word_list.append(word)
for word in word_list:
word = word.lower()
if word in words_frequency:
words_frequency[word] += 1
else:
words_frequency[word] = 1
print(words_frequency)
list_of_values = sorted(words_frequency.values())
sorted_list = list_of_values[-3:]
for key, value in words_frequency.items():
if value in sorted_list:
print(f"{key} - {value}")
|
#!usr/bin/python3
import sys
import requests
import threading
import os
import tempfile
from urllib.request import urlretrieve
import bs4 as bs
from PIL import Image
from time import sleep
import re
def extract_values(pretty_soup):
list_args = pretty_soup.splitlines()
for i in range(0, 4):
list_args.pop(0)
list_args.pop()
whitespaces = re.compile(r'^\s+|/s+$')
all_items = dict()
for item in list_args:
item = re.sub(whitespaces, '', item)
kv = item.split(':', 1)
# print(kv)
lhs = ""
if len(kv) == 2:
lhs = kv[1]
if len(lhs) >= 2:
lhs = lhs[1:-2]
# print(kv[0], lhs)
if len(kv[0]) > 0:
all_items[kv[0]] = lhs
# print(item)
return all_items
def get_img_list(list_img):
list_img = list_img.split(',')
new_img_list = []
for image_str in list_img:
image_str = image_str[1:-1]
nil = image_str.split("\\")
new_img_str = ""
for item in nil:
new_img_str += item
new_img_list.append(new_img_str)
return new_img_list
def download_single_image(img_url, img_name):
# print("KEKW")
urlretrieve(img_url, img_name)
def make_pdf(path, file_list):
list_pages = []
save_path = cwd+'/'+name
# print(save_path)
head = Image.open(path+'/'+file_list[0])
head = head.convert('RGB')
for file in file_list[1:]:
img = Image.open(path+'/'+file)
img = img.convert('RGB')
list_pages.append(img)
head.save(save_path, save_all=True, append_images=list_pages)
def clean_up(tmp_dir):
os.chdir(tmp_dir)
for f in os.listdir(tmp_dir):
os.remove(os.path.join(tmp_dir, f))
os.rmdir(tmp_dir)
def download_images(list_img):
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
list_names = []
for img in list_img:
# get image name
split_url = img.split('?')
name_list = split_url[0].split('/')
img_name = name_list[len(name_list)-1]
list_names.append(img_name)
while threading.activeCount() >= 100:
sleep(1)
threading.Thread(target=download_single_image, args=(img, img_name)).start()
# urllib.urlretrieve(img, "name")
while threading.activeCount() != 1:
sleep(1)
list_names.sort()
# print(list_names)
make_pdf(tmp_dir, list_names)
clean_up(tmp_dir)
def main(res_url):
# get Pearson e-reader site
# must be in HU VPN
p_page = requests.get(res_url)
soup = bs.BeautifulSoup(p_page.content, 'html.parser')
script = soup.body.script
contents = script.prettify()
all_items = extract_values(contents)
list_images = all_items.get('list')
list_images = get_img_list(list_images)
download_images(list_images)
if __name__ == '__main__':
url = ""
name = ""
if len(sys.argv) == 1:
url = input("Give URL for resource: ")
name = input("Give desired filename: ")
# url = "https://www.pearson-studium.de/drm/reader/fr/usr/160309/isbn/9783863267391"
else:
assert len(sys.argv) == 3, "make sure to use it like that: 'scraper.py URL filename.pdf'"
url = sys.argv[1]
name = sys.argv[2]
cwd = os.getcwd()
# url = "https://www.pearson-studium.de/drm/reader/fr/usr/160309/isbn/9783863267391"
if "pearson-studium" not in url:
exit("Wrong URL!")
main(url)
|
import os
import csv
from logger import *
# need to rewrite this to log some graphs into one graphs
# just say how to name them in graph = algorithm_list[]
# and give their csv path to csv_list[] ]
log_dir = './results/'
log_dir_dqn = './results/doudizhu_dqn_result'
log_dir_ddqn = './results/doudizhu_ddqn_result'
log_dir_dddqn = './results/doudizhu_dueling_ddqn_result'
log_dir_random = './results/doudizhu_random_agent_result'
save_path_performance= log_dir + '/general_performance_figure'
# EDIT ALGORITHM and CSV List for other plots together
algorithm_list = ['DQN', 'DDQN', 'Dueling DDQN', 'Random']
what_plot = 'reward' # agent_landlord_wins; agent_peasant_wins
# performance
csv_list_performance = [ log_dir_dqn + '/performance_0.csv', log_dir_ddqn + '/performance_1.csv', log_dir_dddqn + '/performance_2.csv', log_dir_random + '/performance_0.csv']
plot_figures_one(csv_list_performance, save_path_performance, algorithm_list, what_plot)
save_path_agent_l = log_dir + 'landlord_performance_figure'
what_plot = 'agent_landlord_wins'
csv_list_agents_landlord_wins = [log_dir_dqn + '/agent_landlord_perf_0.csv', log_dir_ddqn + '/agent_landlord_perf_1.csv', log_dir_dddqn + '/agent_landlord_perf_2.csv', log_dir_random +'/agent_landlord_perf_0.csv']
plot_figures_one(csv_list_agents_landlord_wins, save_path_agent_l,algorithm_list, what_plot)
save_path_agent_p = log_dir + 'peasant_performance_figure'
what_plot = 'agent_peasant_wins'
csv_list_agents_peasant_wins = [log_dir_dqn + '/agent_peasant_perf_0.csv', log_dir_ddqn + '/agent_peasant_perf_1.csv', log_dir_dddqn + '/agent_peasant_perf_2.csv', log_dir_random +'/agent_peasant_perf_0.csv']
plot_figures_one(csv_list_agents_peasant_wins, save_path_agent_p, algorithm_list, what_plot)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Raphaël Barrois
# This software is distributed under the two-clause BSD license.
import logging
import json
from django.db import transaction
from django import http
from . import models
logger = logging.getLogger(__name__)
def log_exceptions(view):
"""Simple decorator to get meaningful error logs in tests."""
def decorated(request, *args, **kwargs):
try:
return view(request, *args, **kwargs)
except Exception as e:
logger.exception("Error in %s.%s: %r", view.__module__, view.__name__, e)
raise
return decorated
@log_exceptions
def read(request):
qs = (models.Something.objects
.order_by('pk')
.values('pk', 'data')
)
data = json.dumps(list(qs))
return http.HttpResponse(data, content_type='application/json')
@log_exceptions
@transaction.atomic
def atomic_read(request):
return read(request)
|
import pytest
from ..models import Wishlist, WishlistItem
def test_remove_only_variant_also_removes_wishlist_item(customer_wishlist_item):
assert customer_wishlist_item.variants.count() == 1
variant = customer_wishlist_item.variants.first()
wishlist = customer_wishlist_item.wishlist
assert wishlist.items.count() == 1
wishlist.remove_variant(variant)
assert wishlist.items.count() == 0
with pytest.raises(WishlistItem.DoesNotExist):
customer_wishlist_item.refresh_from_db()
def test_remove_single_variant_from_wishlist_item(
customer_wishlist_item_with_two_variants,
):
assert customer_wishlist_item_with_two_variants.variants.count() == 2
[variant_1, variant_2] = customer_wishlist_item_with_two_variants.variants.all()
wishlist = customer_wishlist_item_with_two_variants.wishlist
wishlist.remove_variant(variant_1)
customer_wishlist_item_with_two_variants.refresh_from_db()
assert customer_wishlist_item_with_two_variants.variants.count() == 1
assert customer_wishlist_item_with_two_variants.variants.first() == variant_2
def test_move_items_between_wishlists_with_duplicates(variant, customer_wishlist_item):
dst_wishlist_item = customer_wishlist_item
assert dst_wishlist_item.variants.count() == 1
dst_wishlist = dst_wishlist_item.wishlist
assert dst_wishlist.items.count() == 1
dst_variant = dst_wishlist_item.variants.first()
assert variant.pk != dst_variant.pk
# Create the source wishlist
src_wishlist = Wishlist.objects.create()
# Add the new variant
src_item_1 = src_wishlist.add_variant(variant)
# Add the destination variants to the source wishlist (the duplicate case)
src_item_2 = src_wishlist.add_variant(dst_variant)
# Move items from the source to the destination wishlist
WishlistItem.objects.move_items_between_wishlists(src_wishlist, dst_wishlist)
# Check the source wishlist doesn't have any items
assert src_wishlist.items.count() == 0
# Check the destination wishlist has two items
assert dst_wishlist.items.count() == 2
# Check that the source wishlist item with the new variant was moved to the
# destination wishlist
src_item_1.refresh_from_db()
assert src_item_1.wishlist == dst_wishlist
# Check that the source wishlist item with the duplicate variant was removed
with pytest.raises(WishlistItem.DoesNotExist):
src_item_2.refresh_from_db()
|
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtWidgets import (
QCheckBox, QGridLayout, QHBoxLayout, QLabel, QPushButton,
)
from ..ctrl_widgets import _AbstractCtrlWidget
from ..ctrl_widgets.smart_widgets import SmartBoundaryLineEdit
from ..gui_helpers import create_icon_button
from ...config import config, GeomAssembler
from ...database import Metadata as mt
class MaskCtrlWidget(_AbstractCtrlWidget):
"""Widget for masking image in the ImageToolWindow."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.threshold_mask_le = SmartBoundaryLineEdit('-1e5, 1e5')
# avoid collapse on online and maxwell clusters
self.threshold_mask_le.setMinimumWidth(160)
self.mask_tile_cb = QCheckBox("Mask tile edges")
if config["MASK_TILE_EDGE"]:
self.mask_tile_cb.setChecked(True)
self.mask_asic_cb = QCheckBox("Mask ASIC edge")
if config["MASK_ASIC_EDGE"]:
self.mask_asic_cb.setChecked(True)
icon_size = 30
self.draw_mask_btn = create_icon_button(
"draw_mask.png", icon_size, description="Draw mask")
self.draw_mask_btn.setCheckable(True)
self.erase_mask_btn = create_icon_button(
"erase_mask.png", icon_size, description="Erase mask")
self.erase_mask_btn.setCheckable(True)
self.remove_btn = create_icon_button(
"remove_mask.png", icon_size, description="Remove mask")
self.load_btn = QPushButton("Load mask")
self.save_btn = QPushButton("Save mask")
self.mask_save_in_modules_cb = QCheckBox("Save mask in modules")
self._exclusive_btns = {self.erase_mask_btn, self.draw_mask_btn}
self._non_reconfigurable_widgets = [
self.save_btn,
self.load_btn,
self.mask_save_in_modules_cb,
]
self.initUI()
self.initConnections()
def initUI(self):
"""Override."""
layout = QGridLayout()
AR = Qt.AlignRight
row = 0
layout.addWidget(QLabel("Threshold mask: "), row, 0, AR)
layout.addWidget(self.threshold_mask_le, row, 1)
if config["MASK_TILE_EDGE"]:
row += 1
layout.addWidget(self.mask_tile_cb, row, 0, AR)
if config["MASK_ASIC_EDGE"]:
row += 1
layout.addWidget(self.mask_asic_cb, row, 0, AR)
row += 1
sub_layout = QHBoxLayout()
sub_layout.addWidget(self.draw_mask_btn)
sub_layout.addWidget(self.erase_mask_btn)
sub_layout.addWidget(self.remove_btn)
layout.addLayout(sub_layout, row, 0)
row += 1
layout.addWidget(self.load_btn, row, 0)
layout.addWidget(self.save_btn, row, 1)
if self._require_geometry:
row += 1
layout.addWidget(self.mask_save_in_modules_cb, row, 0, 1, 2, AR)
layout.setVerticalSpacing(20)
self.setLayout(layout)
def initConnections(self):
"""Override."""
mediator = self._mediator
self.threshold_mask_le.value_changed_sgn.connect(
mediator.onImageThresholdMaskChange)
self.mask_tile_cb.toggled.connect(
mediator.onImageMaskTileEdgeChange)
self.mask_asic_cb.toggled.connect(
mediator.onImageMaskAsicEdgeChange)
mediator.assembler_change_sgn.connect(self._onAssemblerChange)
self.erase_mask_btn.toggled.connect(self._updateExclusiveBtns)
self.draw_mask_btn.toggled.connect(self._updateExclusiveBtns)
self.remove_btn.clicked.connect(
lambda: self._updateExclusiveBtns(True))
# required for loading metadata
self.mask_save_in_modules_cb.toggled.connect(
mediator.onImageMaskSaveInModulesToggled)
@pyqtSlot(object)
def _onAssemblerChange(self, assembler):
if assembler == GeomAssembler.EXTRA_GEOM:
self.mask_tile_cb.setChecked(False)
self.mask_tile_cb.setEnabled(False)
self.mask_asic_cb.setChecked(False)
self.mask_asic_cb.setEnabled(False)
self.mask_save_in_modules_cb.setChecked(False)
self.mask_save_in_modules_cb.setEnabled(False)
else:
self.mask_tile_cb.setEnabled(True)
self.mask_asic_cb.setEnabled(True)
self.mask_save_in_modules_cb.setEnabled(True)
def updateMetaData(self):
"""Override."""
self.threshold_mask_le.returnPressed.emit()
self.mask_tile_cb.toggled.emit(self.mask_tile_cb.isChecked())
self.mask_asic_cb.toggled.emit(self.mask_asic_cb.isChecked())
self.mask_save_in_modules_cb.toggled.emit(
self.mask_save_in_modules_cb.isChecked())
return True
def loadMetaData(self):
"""Override."""
cfg = self._meta.hget_all(mt.IMAGE_PROC)
self.threshold_mask_le.setText(cfg["threshold_mask"][1:-1])
if config["MASK_TILE_EDGE"]:
self.mask_tile_cb.setChecked(cfg["mask_tile"] == 'True')
if config["MASK_ASIC_EDGE"]:
self.mask_asic_cb.setChecked(cfg["mask_asic"] == 'True')
if self._require_geometry:
self.mask_save_in_modules_cb.setChecked(
cfg["mask_save_in_modules"] == 'True')
@pyqtSlot(bool)
def _updateExclusiveBtns(self, checked):
if checked:
for at in self._exclusive_btns:
if at != self.sender():
at.setChecked(False)
def setInteractiveButtonsEnabled(self, state):
self.draw_mask_btn.setEnabled(state)
self.erase_mask_btn.setEnabled(state)
|
#!/usr/bin/py
# Head ends here
def lonelyinteger(b):
b = list(b)
for item in b:
if b.count(item) == 1:
return item
return None
# Tail starts here
if __name__ == '__main__':
a = int(input())
b = map(int, input().strip().split(" "))
print(lonelyinteger(b))
|
import subprocess
subprocess.call('python setup.py sdist')
subprocess.call('python setup.py sdist bdist_wheel upload')
|
def divide(a, b):
try:
result = a / b
return result
except (ZeroDivisionError):
return "Cannot divide by zero brother"
print(divide(1, 0)) |
import sys
import argparse
import numpy as np
import pandas as pd
import csv
import json
from numpy import percentile
from sequence_model.estimate_gru_ae import LSTMAutoEncoder
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, roc_auc_score
from unsupervised.AutoEncoder_torch import AutoEncoder
WINDOW_SIZE = 20
EMBEDDING_SIZE=97
FILTER_CLASS_NAME=['', 'DoS Hulk', 'DDoS', 'PortScan', 'DoS GoldenEye', 'DoS Slowhttptest', 'DoS slowloris', 'FTP-Patator', 'SSH-Patator']
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('inputpath', type=str, help='Path of the feature matrix to load')
parser.add_argument('output_path', type=str, help='Path of the result to save')
parser.add_argument('--filter_class', type=int,
help='id of filter class', default=0)
parser.add_argument('--epoch', type=int,
help='number of the training epochs', default=10)
parser.add_argument('--batch_size', type=int,
help='number of the batch_size', default=128)
parser.add_argument('--theta', type=int,
help='theta for confidence estimate', default=50)
parser.add_argument('--seed', type=int,
help='random seed', default=1310)
parser.add_argument('--ratio_label', type=float,
help='ratio of labeled training data', default=0.01)
parser.add_argument('--device', type=str,
help='type of gpu device', default='cpu')
parser.add_argument('--contam', type=float,
help='the percent of the outiers.', default=0.2)
return parser.parse_args(argv)
def split_dataset_horizontal(dataset, rate=0.2, is_split=True):
num_train = int(len(dataset) * rate)
if is_split:
return dataset[:num_train], dataset[num_train:]
else:
return np.copy(dataset[:num_train]), dataset
def clear_specific_class(data, class_name):
data = list(filter(lambda p: p[-1] != class_name, data))
return data
def output_score(scores, name):
f = open(name,"w",newline='')
writer=csv.writer(f,dialect='excel')
for i in range(len(scores[0])):
p = []
for h in scores:
p.append(h[i])
writer.writerow(p)
f.close()
def scale_data(data, scalar=None):
# data = MinMaxScaler(feature_range=(0, 1)).fit_transform(data)
if (scalar == None):
scalar = MinMaxScaler(feature_range=(0, 1)).fit(data)
data = scalar.transform(data)
return data, scalar
def shuffle_data(data, seed=1310):
data = shuffle(data, random_state=seed)
return data
def transform(data_list):
features = []
labels = []
raw_labels = []
seq_length = []
for data in data_list:
feature = []
for i in range(min(len(data) - 2, WINDOW_SIZE)):
dst_port, hlength, pllength, delta_ts, TCP_winsize = [int(ele) for ele in data[i].split('|')]
# emb_src_port = big_unpackbits(src_port, 2)
emb_dst_port = big_unpackbits(dst_port, 2)
emb_hlength = big_unpackbits(hlength, 4)
emb_pllength = big_unpackbits(pllength, 4)
emb_TCP_winsize = big_unpackbits(TCP_winsize, 4)
emb_delta_ts = np.array([delta_ts])
# emb_dst_port = np.array([dst_port])
# emb_hlength = np.array([hlength])
emb_pllength = np.array([pllength])
# emb_TCP_winsize = np.array([TCP_winsize])
h = np.concatenate((emb_dst_port, emb_TCP_winsize,emb_hlength, emb_pllength, emb_delta_ts))
feature.append(h)
if len(data) - 2 < WINDOW_SIZE:
for i in range(WINDOW_SIZE + 2 - len(data)):
feature.append(np.array([0] * len(h)))
features.append(np.stack(feature))
labels.append(int(data[-2]))
raw_labels.append(data[-1])
seq_length.append(min(len(data) - 2, WINDOW_SIZE))
return (np.stack(features).astype(np.double).reshape(len(features), -1),
np.array(labels, dtype=int), np.array(seq_length, dtype=int),
np.array(raw_labels))
def big_unpackbits(mynum, max_block=1):
# return np.array([mynum])
cutted_num = np.array([(mynum>>i*8)&0xff for i in range(max_block)], dtype=np.uint8)
return np.unpackbits(cutted_num)
def read_data(path):
with open (path, 'r') as f:
reader = csv.reader(f, delimiter=',')
data_list = list(reader)
return data_list
def eval_data(label, predicted_label, raw_label):
ifR = pd.crosstab(label, predicted_label)
ifR = pd.DataFrame(ifR)
print(ifR)
rawifR = pd.crosstab(raw_label, predicted_label)
print(pd.DataFrame(rawifR))
f1 = f1_score(label, predicted_label, average='binary', pos_label=1)
precision = precision_score(label, predicted_label, average='binary', pos_label=1)
recall = recall_score(label, predicted_label, average='binary', pos_label=1)
accuracy = accuracy_score(label, predicted_label)
return precision, recall, f1, accuracy
def get_label_n(predicted_score, contam):
threshold = percentile(predicted_score, 100 * (1 - contam))
predicted_label = (predicted_score > threshold).astype('int')
return predicted_label
def exec_lstm_autoencoder(train_labeled_data, train_unlabeled_data, test_data, epoch, save_name, device, batch_size, theta):
print("now execute the model LSTM AutoEncoder by Pytorch!")
lstmAutoencoder = LSTMAutoEncoder(train_unlabeled_data[0].shape[2], train_unlabeled_data[0].shape[1], device,save_name, theta=theta)
lstmAutoencoder.train_model(train_labeled_data, train_unlabeled_data, test_data, epoch=epoch, batch_size=batch_size)
predicted_label, predicted_score, classify_score, confidence_score = lstmAutoencoder.evaluate_model(test_data)
# predicted_label = get_label_n(predicted_score, contam)
roc=roc_auc_score(test_data[1], predicted_score)
print("roc auc= %.6lf" %(roc))
roc=roc_auc_score(test_data[1], classify_score)
print("roc auc classify= %.6lf" %(roc))
output_score((classify_score, predicted_score, test_data[1]), 'lstmAutoencoder.csv')
precision, recall, f1_score, accuracy = eval_data(test_data[1], predicted_label, test_data[3])
print("precision = %.6lf\nrecall = %.6lf\nf1_score = %.6lf\naccuracy = %.6lf"
%(precision, recall, f1_score, accuracy))
new_name = "{}_{:.4f}.json".format(save_name, roc)
dicts = {}
dicts['test_auc'] = roc
dicts['f1_score'] = f1_score
dicts['test_scores'] = list(predicted_score)
dicts['conf_scores'] = list(confidence_score)
dicts['test_label'] = list(test_data[1].astype(np.float))
dicts['test_raw_label'] = list(test_data[3])
with open(new_name,"w") as f:
json.dump(dicts,f)
f.close()
def main(args):
dataset = read_data(args.inputpath)
print("Reading Data done......")
dataset = shuffle_data(dataset, seed=args.seed)
train_data, test_data = split_dataset_horizontal(dataset, 0.6, True)
train_data = clear_specific_class(train_data, FILTER_CLASS_NAME[args.filter_class])
train_labeled_data, train_unlabeled_data = split_dataset_horizontal(train_data, args.ratio_label, False)
train_labeled_feature, train_label, train_labeled_seqlen, train_raw_label = transform(train_labeled_data)
train_unlabeled_feature, _, train_unlabeled_seqlen, _ = transform(train_unlabeled_data)
test_feature, test_label, test_seqlen, test_raw_label = transform(test_data)
# p, scalar = scale_data(train_unlabeled_feature[:,:,-4:].reshape(-1, 4))
# train_unlabeled_feature[:,:,-4:] = p.reshape(-1, WINDOW_SIZE, 4)
# p, _ = scale_data(train_labeled_feature[:, :, -4:].reshape(-1, 4), scalar)
# train_labeled_feature[:,:,-4:] = p.reshape(-1, WINDOW_SIZE, 4)
# p, _ = scale_data(test_feature[:, :, -4:].reshape(-1, 4), scalar)
# test_feature[:,:,-4:] = p.reshape(-1, WINDOW_SIZE, 4)
train_unlabeled_feature, scalar = scale_data(train_unlabeled_feature)
train_labeled_feature, _ = scale_data(train_labeled_feature, scalar)
test_feature, _ = scale_data(test_feature, scalar)
train_unlabeled_feature = train_unlabeled_feature.reshape(len(train_unlabeled_feature), WINDOW_SIZE, -1)
train_labeled_feature = train_labeled_feature.reshape(len(train_labeled_feature), WINDOW_SIZE, -1)
test_feature = test_feature.reshape(len(test_feature), WINDOW_SIZE, -1)
print("Preprocessing Data done......")
save_name = "{}_{:.2f}_{}".format('ue-ssgru', args.ratio_label, FILTER_CLASS_NAME[args.filter_class])
save_name = args.output_path + '/' + save_name
train_labeled_data = (train_labeled_feature, train_label, train_labeled_seqlen)
train_unlabeled_data = (train_unlabeled_feature, train_unlabeled_seqlen)
test_data = (test_feature, test_label, test_seqlen, test_raw_label)
exec_lstm_autoencoder(train_labeled_data, train_unlabeled_data, test_data, args.epoch, save_name, args.device, args.batch_size, args.theta)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
from . import files
from . import error_logs
from . import runtime
modules = [files, error_logs, runtime]
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class JobsSpiderPipeline(object):
def __init__(self):
self.conn = pymysql.connect(host='192.168.33.10', user='root', passwd='root', db='spider', charset='utf8')
self.cur = self.conn.cursor()
def open_spider(self, spider):
print('spider start')
def process_item(self, item, spider):
import time
# 格式化成2016-03-20 11:45:39形式
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 插入数据
sql = "insert ignore into jobs(job_name,job_area,job_price,job_company,job_url,flag,create_time) values('{}','{}','{}','{}','{}','{}','{}')".format(item['job_name'], item['job_area'], item['job_price'], item['job_company'], item['job_url'], item['flag'],
now)
#print(sql)
reCount = self.cur.execute(sql)
self.conn.commit()
return item
def close_spider(self, spider):
self.cur.close()
self.conn.close() |
import cx_Oracle
connection = cx_Oracle.connect('xndb', 'L6vz5vFwcWur', '10.15.14.89:1521/xndev')
# pprint(connection)
apply_id = '30260'
cursor = connection.cursor()
cursor.execute(
"SELECT * FROM HOUSE_COMMON_LOAN_INFO WHERE APPLY_ID=" + apply_id
)
# cursor.execute(
# "UPDATE house_common_loan_info t SET t.pay_date=sysdate, t.status='LOAN_PASS' WHERE t.apply_id= '39117'",
# )
# apply_code = 'GZ20180330C14'
#
# sql = "UPDATE house_common_loan_info t SET t.pay_date=sysdate, t.status='' \
# WHERE t.apply_id= (SELECT t.apply_id FROM house_apply_info t \
# WHERE t.apply_code =" + "'" + apply_code + "'" + ")"
# cursor.execute(sql)
# connection.commit()
# print(cursor.fetchall()[0])
# pprint(cursor.fetchall())
# 一次返回所有结果集
res = cursor.fetchall()
for r in res:
print(r)
print(cursor.description)
# 一次返回一行
# row = cursor.fetchone()
# print(row)
# for i in cursor:
# pprint(i)
# pprint(cursor.description)
cursor.close()
connection.close()
|
import re
import math
def check_float(text):
match = re.fullmatch(r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)', text)
return bool(match)
def func(x):
return math.cos(x) - x
def autofill(filename):
file = open(filename, 'w')
for i in range(-10, 10, 1):
file.write(str(i/10) + " " + str(func(i/10)) + "\n")
file.close()
def print_table(table):
print('\n', '{:^10}{:^10}'.format('x', 'y(x)'))
for i in range(len(table[0])):
print('{:^10.3f}{:^10.3f}'.format(table[0][i], table[1][i]))
def find_place(table, x):
for i in range(len(table[0])):
if table[0][i] > x:
return i
return len(table[0]) - 1
def splines(table, x):
length = len(table[0])
position = find_place(table, x)
h = [0 for i in range(length)]
A = [0 for i in range(length)]
B = [0 for i in range(length)]
D = [0 for i in range(length)]
F = [0 for i in range(length)]
a = [0 for i in range(length)]
b = [0 for i in range(length)]
c = [0 for i in range(length + 1)]
d = [0 for i in range(length)]
ksi = [0 for i in range(length + 1)]
eta = [0 for i in range(length + 1)]
for i in range(1, length):
h[i] = table[0][i] - table[0][i - 1]
for i in range(2, length):
A[i] = h[i - 1]
B[i] = -2 * (h[i - 1] + h[i])
D[i] = h[i]
F[i] = -3 * ((table[1][i] - table[1][i - 1]) / h[i] -
(table[1][i - 1] - table[1][i - 2]) / h[i - 1])
for i in range(2, length):
ksi[i + 1] = D[i] / (B[i] - A[i] * ksi[i])
eta[i + 1] = (A[i] * eta[i] + F[i]) / (B[i] - A[i] * ksi[i])
for i in range(length - 2, -1, -1):
c[i] = ksi[i + 1] * c[i + 1] + eta[i + 1]
for i in range(1, length):
a[i] = table[1][i - 1]
b[i] = (table[1][i] - table[1][i - 1]) / h[i] - h[i] / 3 * \
(c[i + 1] + 2 * c[i])
d[i] = (c[i + 1] - c[i]) / (3 * h[i])
# print(table[0][position], a, b, c, d, sep='\n')
res = (a[position] + b[position] * (x - table[0][position - 1]) +
c[position] * ((x - table[0][position - 1]) ** 2) +
d[position] * ((x - table[0][position - 1]) ** 3))
return res
|
import paho.mqtt.client as mqtt
# get the localhost IP by using "hostname -I" in terminal
broker_ip = "10.128.0.3"
# 1883 is a default port that is unencrypted
broker_port = 1883
def imitation_bme():
bme_data = ""
return bme_data
if __name__ == '__main__':
client = mqtt.Client()
client.connect(broker_ip, broker_port)
client.publish(topic="OpenAgBloom/Air/BME", payload=imitation_bme(), qos=1,
retain=False)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
# Register your models here.
from .models import Registration
from .models import Student
from .models import Parent
from .models import Staff
from .models import Staff_Position
from .models import Grade
from .models import Fee
from .models import Subject
from .models import Payment
from .models import Salarie
from .models import Payroll
from .models import AcademicYear
from .models import StudentMark
from .models import ClassSchedule
from .models import StudentAttendance
from .models import UserLog
class RegistrationModelAdmin(admin.ModelAdmin):
list_display = ["lastName", "middleName", "firstName", "grade", "gender",
"phone_number", "email", "address", "city", "county",
"nationality", "dateOfBirth", "placeOfBirth", "regDate",
"country", "emergency", "emergency_phone", "transcript",
"created", "modified"]
list_display_links = ["lastName"]
list_filter = ["lastName", "middleName", "firstName"]
list_editable = ["firstName"]
search_fields = ["lastName", "firstName"]
class Meta:
model = Registration
admin.site.register(Registration, RegistrationModelAdmin)
class StudentModelAdmin(admin.ModelAdmin):
list_display = ["studentID", "registration", "student_photo",
"previous_school", "previous_school_address",
"last_year_attendance", "level", "enrollment_status",
"enrollment_Date", "created", "modified"]
list_display_links = ["registration"]
list_filter = ["enrollment_status", "previous_school"]
search_fields = ["previous_school", "enrollment_Date"]
class Meta:
model = Student
admin.site.register(Student, StudentModelAdmin)
class ParentModelAdmin(admin.ModelAdmin):
list_display = ["parentID", "student", "lastName", "middleName",
"firstName", "gender", "address", "phone_number", "email",
"nationality", "occupation", "entity_name",
"entity_address", "salary_range", "created", "modified"]
list_display_links = ["parentID"]
list_filter = ["lastName", "middleName", "firstName"]
list_editable = ["firstName"]
search_fields = ["lastName", "student"]
class Meta:
model = Parent
admin.site.register(Parent, ParentModelAdmin)
class StaffModelAdmin(admin.ModelAdmin):
list_display = ["staffID", "staff_photo", "lastName", "middleName",
"firstName", "gender", "phone_number", "email",
"dateOfBirth", "placeOfBirth", "nationality",
"numberOfSubject", "qualification", "experience",
"licence", "age", "created", "modified"]
list_display_links = ["staffID"]
list_filter = ["lastName", "middleName", "firstName"]
list_editable = ["firstName"]
search_fields = ["lastName", "numberOfSubject"]
class Meta:
model = Staff
admin.site.register(Staff, StaffModelAdmin)
class Staff_PositionModelAdmin(admin.ModelAdmin):
list_display = ["staff", "departmentName", "created"]
list_display_links = ["staff"]
list_filter = ["departmentName", "staff"]
search_fields = ["departmentName"]
class Meta:
model = Staff_Position
admin.site.register(Staff_Position, Staff_PositionModelAdmin)
class GradeModelAdmin(admin.ModelAdmin):
list_display = ["student", "grade", "sponsor", "totalNumberOfStudents",
"created"]
list_display_links = ["grade"]
list_filter = ["sponsor", "grade"]
search_fields = ["sponsor", "grade"]
class Meta:
model = Grade
admin.site.register(Grade, GradeModelAdmin)
class SubjectModelAdmin(admin.ModelAdmin):
list_display = ["staff", "subject", "created"]
list_display_links = ["subject"]
list_filter = ["staff", "subject"]
search_fields = ["subject"]
class Meta:
model = Subject
admin.site.register(Subject, SubjectModelAdmin)
class FeeModelAdmin(admin.ModelAdmin):
list_display = ["grade", "totalannualfee", "created", "modified"]
list_display_links = ["grade"]
list_filter = ["totalannualfee", "grade"]
search_fields = ["grade"]
class Meta:
model = Fee
admin.site.register(Fee, FeeModelAdmin)
class PaymentModelAdmin(admin.ModelAdmin):
list_display = ["ReceiptNo", "student", "fee", "installment",
"amount", "balance",
"digitalSignature",
"transaction_Date", "created", "modified"]
list_display_links = ["student"]
list_filter = ["installment", "amount"]
search_fields = ["ReceiptNo"]
class Meta:
model = Payment
admin.site.register(Payment, PaymentModelAdmin)
class SalarieModelAdmin(admin.ModelAdmin):
list_display = ["staff", "grossmonthlysalary", "monthlyincometax",
"contractperiod", "grossannualsalary", "accountnumber",
"taxDeduct", "yearstart",
"yearend", "created", "modified"]
list_display_links = ["staff"]
list_filter = ["contractperiod", "grossmonthlysalary"]
search_fields = ["staff"]
class Meta:
model = Salarie
admin.site.register(Salarie, SalarieModelAdmin)
class PayrollModelAdmin(admin.ModelAdmin):
list_display = ["staff", "monthly",
"netmonthlysalary", "month", "salaryDate", "created"]
list_display_links = ["staff"]
list_filter = ["monthly"]
search_fields = ["staff"]
class Meta:
model = Payroll
admin.site.register(Payroll, PayrollModelAdmin)
class AcademicYearModelAdmin(admin.ModelAdmin):
list_display = ["yearstart", "yearend", "semester", "student",
"isactive", "created"]
list_display_links = ["student"]
list_filter = ["yearstart"]
search_fields = ["student"]
class Meta:
model = AcademicYear
admin.site.register(AcademicYear, AcademicYearModelAdmin)
class StudentMarkModelAdmin(admin.ModelAdmin):
list_display = ["student", "semester", "subject", "peroid",
"gradeScore", "studenRank", "status", "created"]
list_display_links = ["student"]
list_filter = ["subject", "peroid"]
search_fields = ["subject", "gradeScore"]
class Meta:
model = StudentMark
admin.site.register(StudentMark, StudentMarkModelAdmin)
class ClassScheduleModelAdmin(admin.ModelAdmin):
list_display = ["yearstart", "yearend", "grade", "day", "classTimein",
"classTimeout", "semester", "subject", "staff",
"created", "modified"]
list_display_links = ["grade"]
list_filter = ["day", "classTimein"]
search_fields = ["staff", "subject"]
class Meta:
model = ClassSchedule
admin.site.register(ClassSchedule, ClassScheduleModelAdmin)
class StudentAttendanceModelAdmin(admin.ModelAdmin):
list_display = ["student", "semester", "grade", "day",
"reasonAbsent", "absentDate", "created"]
list_display_links = ["student"]
list_filter = ["grade", "student"]
search_fields = ["absent", "reasonAbsent"]
class Meta:
model = StudentAttendance
admin.site.register(StudentAttendance, StudentAttendanceModelAdmin)
class ContactInline(admin.StackedInline):
model = UserLog
can_delete = False
verbose_name_plural = 'user'
# Define a new User admin
class UserAdmin(BaseUserAdmin):
inlines = (ContactInline, )
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
|
import numpy as np
import copy
cubes = list(input())
cubes = np.array(list(map(int, cubes)))
zeroNum = 0
oneNum = 0
for i in range(0, len(cubes)):
if cubes[i] == 0:
zeroNum += 1
else :
oneNum += 1
print(min(zeroNum, oneNum) * 2)
|
# Generated by Django 3.0.2 on 2020-04-17 02:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0038_auto_20200416_2145'),
]
operations = [
migrations.AddField(
model_name='statements',
name='Net_Profit',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
preserve_default=False,
),
]
|
class Solution:
def romanToInt(self, s: str) -> int:
conv = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
nums = 0
for i in range(len(s)-1):
if conv[s[i]] < conv[s[i + 1]]:
nums -= conv[s[i]]
else:
nums += conv[s[i]]
nums += conv[s[-1]]
return nums |
'''
File name: reconstructImg.py
Author:
Date created:
'''
import numpy as np
def reconstructImg(indexes, red, green, blue, targetImg):
# Enter Your Code Here
resultImg = targetImg
for j in range(0,targetImg.shape[0]):
for i in range(0,targetImg.shape[1]):
if indexes[j][i]:
idx = indexes[j][i]
resultImg[j][i][0] = red[int(idx-1)]
resultImg[j][i][1] = green[int(idx-1)]
resultImg[j][i][2] = blue[int(idx-1)]
return resultImg
|
# -*- coding: UTF-8 -*-
# The source code contained in this file is licensed under the MIT license.
# See LICENSE.txt in the main project directory, for more information.
# For the exact contribution history, see the git revision log.
import math
import re
import warnings
from libkne.controlrecord import ControlRecord
from libkne.custom_info_record import CustomInfoRecord
from libkne.data_line import DataLine
from libkne.accountingline import AccountingLine
from libkne.util import assert_match, assert_true, parse_short_date, \
_short_date, parse_number, parse_string, APPLICATION_NUMBER_TRANSACTION_DATA
__all__ = ['DataFile']
class DataFile(object):
def __init__(self, config, version_identifier=None):
self.config = config
self.binary_info = None
transaction_code = str(APPLICATION_NUMBER_TRANSACTION_DATA)
app_nr = str(config.get('application_number', transaction_code))
config['application_number'] = app_nr
is_transaction_data = (app_nr == transaction_code)
self.contains_transaction_lines = is_transaction_data
self.version_identifier = version_identifier
self.lines = []
self.cr = None
self.open_for_additions = True
self.number_of_blocks = None
def _client_total(self):
client_sum_total = 0
for line in self.lines:
client_sum_total += line.transaction_volume
if client_sum_total > 0:
bin_total = 'x'
else:
bin_total = 'w'
int_total = abs(int(100 * client_sum_total))
bin_total += '%014d' % int_total
bin_total += 'y' + 'z'
return bin_total
def _get_complete_feed_line(self):
start_feed_line = '\x1d'
new_feed = '\x18'
bin_line = start_feed_line + new_feed
bin_line += self.config['version_complete_feed_line']
bin_line += '%03d' % self.config['data_carrier_number']
bin_line += self.config['application_number']
bin_line += self.config['name_abbreviation']
bin_line += self.config['advisor_number']
bin_line += self.config['client_number']
year2k = str(self.config['accounting_year'])[2:]
bin_line += self.config['accounting_number'] + year2k
bin_line += _short_date(self.config['date_start'])
bin_line += _short_date(self.config['date_end'])
bin_line += self.config['prima_nota_page']
bin_line += self.config['password']
bin_line += self.config['application_info']
bin_line += self.config['input_info']
bin_line += 'y'
assert len(bin_line) == 80
return bin_line
def _get_short_feed_line(self):
start_feed_line = '\x1d'
new_feed = '\x18'
bin_line = start_feed_line + new_feed
bin_line += self.config['version_complete_feed_line']
bin_line += '%03d' % self.config['data_carrier_number']
bin_line += self.config['application_number']
bin_line += self.config['name_abbreviation']
bin_line += self.config['advisor_number']
bin_line += self.config['client_number']
accounting_nr = '%04d' % int(self.config['accounting_number'])
bin_line += accounting_nr + str(self.config['accounting_year'])[2:]
bin_line += self.config['password']
bin_line += self.config['application_info']
bin_line += self.config['input_info']
bin_line += 'y'
assert len(bin_line) == 65
return bin_line
def _get_number_of_blocks(self):
number_of_bytes = len(self.binary_info)
number_of_blocks = int(math.ceil(float(number_of_bytes) / 256))
assert len(str(number_of_blocks)) <= 5
return number_of_blocks
def _get_versioninfo_for_transaction_data(self, version_identifier):
bin_versioninfo = '\xb5'
# TODO Sachkontonummern-Laenge ev. separat?
bin_versioninfo += version_identifier
bin_versioninfo += '\x1c' + 'y'
assert len(bin_versioninfo) == 13
return bin_versioninfo
def _get_versioninfo_for_masterdata(self, version_identifier):
bin_versioninfo = '\xb6'
# TODO Sachkontonummern-Laenge ev. separat?
bin_versioninfo += version_identifier
bin_versioninfo += '\x1c' + 'y'
assert len(bin_versioninfo) == 13
return bin_versioninfo
def append_line(self, line):
'''Append a new line to this file (only if to_binary() was not called
before on this file!). Return True if the line was appended
successfully else False. If False, no more lines can be appended to
this file.
If this file contains transaction data and the new line belongs to
another financial year, a ValueError is raised.'''
assert_true(self.open_for_additions)
if self.contains_transaction_data():
assert_true(self.may_add_transactions_for(line.date))
date_start = self.config['date_start']
if (date_start == None) or (date_start > line.date):
self.config['date_start'] = line.date
date_end = self.config['date_end']
if (date_end == None) or (date_end < line.date):
self.config['date_end'] = line.date
# Short feed line must contain the same year as the transaction data
self.config['accounting_year'] = line.date.year
self.lines.append(line)
return True
def build_control_record(self, nr_files):
cr = ControlRecord()
cr.file_no = nr_files + 1
cr.application_number = self.config['application_number']
cr.name_abbreviation = self.config['name_abbreviation']
cr.advisor_number = self.config['advisor_number']
cr.client_number = self.config['client_number']
cr.accounting_number = self.config['accounting_number']
cr.accounting_year = self.config['accounting_year']
cr.date_start = self.config['date_start']
cr.date_end = self.config['date_end']
cr.prima_nota_page = self.config['prima_nota_page']
cr.password = self.config['password']
cr.number_of_blocks = self.number_of_blocks
self.cr = cr
return cr
def contains_transaction_data(self):
result = self.contains_transaction_lines
if hasattr(self, 'cr') and self.cr != None:
result = self.cr.describes_transaction_data()
return result
def _compute_number_of_fill_bytes(self):
number_of_bytes = len(self.binary_info)
missing_bytes = 256 - (number_of_bytes % 256)
return missing_bytes
def _add_fill_bytes_at_file_end(self):
missing_bytes = self._compute_number_of_fill_bytes()
if missing_bytes > 0:
self.binary_info += '\x00' * missing_bytes
def _insert_fill_bytes(self, binary_line):
number_fill_bytes = 6
blocks_used = len(self.binary_info) / 256 + 1
end_block_index = blocks_used * 256
free_bytes_in_block = end_block_index - number_fill_bytes \
- len(self.binary_info)
if free_bytes_in_block < len(binary_line):
binary_line = binary_line[:free_bytes_in_block] + \
('\x00' * number_fill_bytes) + \
binary_line[free_bytes_in_block:]
return binary_line
def finish(self):
if self.open_for_additions:
for line in self.lines:
for posting_line in line.to_binary():
self.binary_info += self._insert_fill_bytes(posting_line)
if self.contains_transaction_data():
self.binary_info += self._client_total()
else:
self.binary_info += 'z'
self._add_fill_bytes_at_file_end()
self.open_for_additions = False
def _remove_fill_bytes(self, binary_data, number_data_blocks):
filtered_data = binary_data
for i in range(number_data_blocks, 0, - 1):
end_index = 256 * i - 1
assert ('\x00' * 6) == filtered_data[end_index-5:end_index+1]
start_index = end_index
while filtered_data[start_index] == '\x00':
start_index -= 1
filtered_data = filtered_data[:start_index+1] + filtered_data[end_index+1:]
return filtered_data
def _check_feed_line(self, metadata, binary_data):
assert len(binary_data) == 80
assert binary_data[0] == '\x1d', repr(binary_data[0])
assert binary_data[1] == '\x18'
assert binary_data[2] == '1'
data_carrier_number = self.config['data_carrier_number']
err_msg = ('%d != %s' % (data_carrier_number, repr(binary_data[3:6])))
assert data_carrier_number == int(binary_data[3:6]), err_msg
application_number = int(binary_data[6:8])
application_number_cr = self.cr.parsed_data()['application_number']
assert application_number == application_number_cr, application_number
assert metadata['name_abbreviation'] == binary_data[8:10]
assert metadata['advisor_number'] == int(binary_data[10:17])
assert metadata['client_number'] == int(binary_data[17:22])
assert metadata['accounting_number'] == int(binary_data[22:26])
assert metadata['accounting_year'] == int(binary_data[26:28])
if self.cr.describes_transaction_data():
date_start = parse_short_date(binary_data[28:34])
assert metadata['date_start'] == date_start
date_end = parse_short_date(binary_data[34:40])
assert metadata['date_end'] == date_end
assert metadata['prima_nota_page'] == int(binary_data[40:43])
index = 43
else:
index = 28
assert metadata['password'] == binary_data[index:index+4]
metadata['application_info'] = binary_data[index+4:index+20].strip()
metadata['input_info'] = binary_data[index+20:index+36].strip()
assert 'y' == binary_data[index+36]
return index + 36
def _read_version_record(self, metadata, binary_data):
if self.cr.describes_transaction_data():
assert '\xb5' == binary_data[0], repr(binary_data[0])
else:
assert '\xb6' == binary_data[0], repr(binary_data[0])
assert '1' == binary_data[1]
assert ',' == binary_data[2]
used_general_ledger_account_no_length = int(binary_data[3])
assert used_general_ledger_account_no_length >= 4
assert used_general_ledger_account_no_length <= 8
metadata['used_general_ledger_account_no_length'] = \
used_general_ledger_account_no_length
assert ',' == binary_data[4]
stored_general_ledger_account_no_length = int(binary_data[5])
assert stored_general_ledger_account_no_length >= 4
assert stored_general_ledger_account_no_length <= 8
metadata['stored_general_ledger_account_no_length'] = \
stored_general_ledger_account_no_length
# DATEV SELF says in 5.3.2 (p. 152) that the the used account number
# length must not be smaller than the stored account number length but
# DATEV Rechnungswesen does export those files. SELF Prüfprogramm warns
# that the account numbers will be cut from right so this algorithm is
# used here, too.
if used_general_ledger_account_no_length > stored_general_ledger_account_no_length:
msg_template = "Used general ledger account number in data file " + \
"is greater than the stored general ledger " + \
"account number (%d vs. %d). Account numbers " + \
"will be cut starting from right."
msg = msg_template % (used_general_ledger_account_no_length, stored_general_ledger_account_no_length)
warnings.warn(msg, UserWarning, stacklevel=0)
assert_match(',', binary_data[6])
# DATEV SELF specification says that the product abbreviation is 4 bytes
# long so the version info for transaction data is 13 bytes in total
# (5.3.2, p. 152).
# Unfortunately, DATEV Rechnungswesen may produce files with additional
# spaces after their 'REWE' identification...
# Furthermore 'Kanzlei Rechnungswesen' uses 6 Bytes for their product
# abbreviation (KAREWE).
product_abbreviation, end_index = parse_string(binary_data, 7)
if len(product_abbreviation) > 4:
msg_template = 'Product abbreviation is longer than 4 bytes: %s'
warnings.warn(msg_template % repr(product_abbreviation), UserWarning)
product_abbreviation = product_abbreviation.strip()
assert_true(re.match('^[\w0-9\-]{4,}$', product_abbreviation) != None,
product_abbreviation)
self.config['product_abbreviation'] = product_abbreviation
assert_match('\x1c', binary_data[end_index], binary_data)
assert_match('y', binary_data[end_index + 1])
return end_index + 1
def may_add_transactions_for(self, date_for_new_line):
'''Returns True if transactions for this date may be added to this data
file, otherwise False. Transactions in one data file must all belong to
the same financial year. Currently it is assumed that the financial
year is the same as the calendar year.
Raises a ValueError if this file does not contain transaction data.'''
assert_true(self.contains_transaction_data())
same_financial_year = True
if len(self.lines) > 0:
year_for_new_line = date_for_new_line.year
same_calendar_year = (self.lines[0].date.year == year_for_new_line)
same_financial_year = same_calendar_year
return same_financial_year
def more_posting_lines(self, binary_data, end_index):
if (end_index < len(binary_data)) and \
(binary_data[end_index] not in ['x', 'w']):
return True
return False
def more_custom_info_records(self, binary_data, start_index):
"""Return True if there are custom info records available in
binary_data at the offset start_index."""
if start_index < len(binary_data) and binary_data[start_index] == '\xb7':
return True
return False
def _parse_custom_info_records(self, binary_data, start_index, line):
end_index = start_index
while self.more_custom_info_records(binary_data, end_index):
record, end_index = \
CustomInfoRecord.from_binary(binary_data, end_index)
line.custom_info_records.append(record)
end_index += 1
# In the last round of the loop we increment the counter although the
# loop condition fails afterwards so we need to decrement the end_index.
# This holds also true if we did not parse any data, because then we
# need to fake the end_index so that the character at start_index is
# passed to the next function
end_index -= 1
return end_index
def _check_client_total(self, binary_data, start_index):
nr_start_index = start_index
nr_max_end_index = start_index+1+14-1
client_total, end_index = parse_number(binary_data, nr_start_index, nr_max_end_index)
if binary_data[start_index] == 'w':
client_total *= -1
end_index += 1
assert 'y' == binary_data[end_index], repr(binary_data[end_index])
return end_index + 1
def _parse_transactions(self, binary_data, start_index, metadata):
end_index = start_index - 1
while True:
# There can be multiple subtotals between the lines so we must
# break if we really reached 'client total'
while self.more_posting_lines(binary_data, start_index):
line, end_index = AccountingLine.from_binary(binary_data, start_index, metadata)
self.lines.append(line)
end_index = self._parse_custom_info_records(binary_data, end_index+1, line)
start_index = end_index + 1
end_index = self._check_client_total(binary_data, end_index+2)
if binary_data[end_index] != 'z':
start_index = end_index
else:
break
return end_index
def more_master_data_lines(self, binary_data, start_index):
if start_index < len(binary_data) and binary_data[start_index] == 't':
return True
return False
def _parse_master_data(self, binary_data, start_index):
while self.more_master_data_lines(binary_data, start_index):
line, end_index = DataLine.from_binary(binary_data, start_index)
self.lines.append(line)
start_index = end_index + 1
assert 'z' == binary_data[start_index]
return start_index
def from_binary(self, binary_control_record, data_fp):
'''Takes a binary control record and a file-like object which contains
the data and parses them.'''
self.open_for_additions = False
cr = ControlRecord()
cr.from_binary(binary_control_record)
self.cr = cr
binary_data = data_fp.read()
metadata = self.get_metadata()
number_data_blocks = metadata['number_data_blocks']
assert_true(number_data_blocks > 0, number_data_blocks)
assert_match(256 * number_data_blocks, len(binary_data))
binary_data = self._remove_fill_bytes(binary_data, number_data_blocks)
end_index = self._check_feed_line(metadata, binary_data[:80])
relative_end_index = self._read_version_record(metadata, binary_data[end_index+1:])
end_index += relative_end_index + 1
start_index = end_index + 1
if self.contains_transaction_data():
end_index = self._parse_transactions(binary_data, start_index, metadata)
else:
end_index = self._parse_master_data(binary_data, start_index)
err_msg = ('%d + 1 != %d' % (end_index, len(binary_data)))
assert end_index + 1 == len(binary_data), err_msg
def get_metadata(self):
assert self.cr != None
return self.cr.parsed_data()
def get_posting_lines(self):
assert_true(self.contains_transaction_data())
assert_true(not self.open_for_additions)
return self.lines
def get_master_data_lines(self):
assert not self.contains_transaction_data()
assert not self.open_for_additions
return self.lines
def to_binary(self):
if self.version_identifier != None:
if self.contains_transaction_data():
feedline = self._get_complete_feed_line()
vid = self._get_versioninfo_for_transaction_data(self.version_identifier)
else:
feedline = self._get_short_feed_line()
vid = self._get_versioninfo_for_masterdata(self.version_identifier)
self.binary_info = feedline + vid
self.finish()
self.number_of_blocks = self._get_number_of_blocks()
return self.binary_info
|
import scrapy
import json
import random
import re
import time
import os
import pymongo
from urllib import parse
AGENTS = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.2 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362'
]
HEADERS = {
'method': 'GET',
'scheme': 'https',
'Referer': 'https://www.zhihu.com/',
'Origin': 'https://www.zhihu.com/',
'cache-control': 'no-cache',
'Connection': 'close',
}
META = {
}
PROXIES_Down = []
if os.path.isfile('./ipDown.txt'):
with open('./ipDown.txt', 'r', encoding='utf-8') as f:
ip = f.readline().strip()
while ip:
PROXIES_Down.append(ip)
ip = f.readline().strip()
PROXIES = []
if os.path.isfile('./ip.txt'):
with open('./ip.txt', 'r', encoding='utf-8') as f:
ip = f.readline().strip()
while ip:
if ip not in PROXIES_Down:
PROXIES.append(ip)
ip = f.readline().strip()
file_UFTSpiderOk = 'User_Follow_Topic_OK.txt'
file_UIdFrom = 'User_Follow_OK.txt'
class ZHPeopleFollowQuestionSpider(scrapy.Spider):
name = 'zhPFT'
custom_settings = {
"DOWNLOAD_DELAY": 5,
}
def start_requests(self):
userNO = self.findUserFollowTopicNotSpider()
HEADERS['User-Agent'] = random.choice(AGENTS)
if len(userNO) > 0:
print('\n爬取用户 (%s) 关注的话题\n' % userNO)
url = self.getPageUrl(userNO)
print(META)
yield scrapy.Request(url=url, headers=HEADERS, meta=META, callback=self.parse)
else:
print('所有用户都已经爬取完毕')
return None
def parse(self, response):
if 'proxy' in response.meta:
META['proxy'] = (response.meta)['proxy']
print('proxy : ' + str((response.meta)['proxy']))
regex_1 = r'\/people\/.*\/following/topics'
regex_2 = r'\/members\/.*\/following-topic-contributions?'
r_htmlEle = r'(<[\d\w\s\;\:\'\"\,\.\/\?\!\@\#\$\%\^\&\*\(\)\-\_\=\+]+\/*>)'
r_onlyNum = r'[^\d]'
followLimit = 100
follow = []
# 爬取网页
if re.search(regex_1, response.url) is not None:
print('正在分析 %s ... \n' % response.url)
UId = response.url.split('www.zhihu.com/people/')[-1].split('/following/topics')[0]
for item in response.css('div.List-item'):
try:
divT = item.css('div.ContentItem h2.ContentItem-title')
topic = {
'id': str(divT.css('a::attr(href)').get()).split('/topic/')[-1],
'title': str(divT.css('a div.Popover div::text').get()),
}
follow.append(topic)
except Exception as e:
print(e)
count = self.getUserFollowTopicCountFromMongoDB(UId)
count = self.writeFollowTopicToMongoDB(UId, follow, count)
if count < followLimit:
nextUrl = self.getAPIUrl(UId, count)
yield scrapy.Request(nextUrl, headers=HEADERS, meta=META, callback=self.parse)
elif re.search(regex_2, response.url) is not None:
print('正在分析 %s ... \n' % response.url)
res = json.loads(response.body_as_unicode())
UId = response.url.split('/members/')[-1].split('/following-topic-contributions?')[0]
offset = int(response.url.split('&offset=')[-1].split('&')[0])
limit = int(response.url.split('&limit=')[-1].split('&')[0])
is_end = True
try:
is_end = res['paging']['is_end']
except Exception as e:
print('KeyError : ', end=' ')
print(e)
print('用户 %s 爬取结束' % UId)
count = self.getUserFollowTopicCountFromMongoDB(UId)
with open(file_UFTSpiderOk, 'a', encoding='utf-8') as f:
f.write(UId + ':::' + str(count) + '\n')
userNO = self.findUserFollowTopicNotSpider()
if len(userNO) > 0:
print('\n爬取用户 (%s) 关注的话题\n' % userNO)
url = self.getPageUrl(userNO)
yield scrapy.Request(url=url, headers=HEADERS, meta=META, callback=self.parse)
return None
for data in res['data']:
try:
topic = {
'id': data['topic']['id'],
'title': data['topic']['name']
}
follow.append(topic)
except Exception as e:
print(str(data))
print(e)
count = self.getUserFollowTopicCountFromMongoDB(UId)
count = self.writeFollowTopicToMongoDB(UId, follow, count)
if not is_end and count < followLimit:
offset += limit
nextUrl = self.getAPIUrl(UId, offset)
yield scrapy.Request(nextUrl, headers=HEADERS, meta=META, callback=self.parse)
else:
with open(file_UFTSpiderOk, 'a', encoding='utf-8') as f:
f.write(UId + ':::' + str(count) + '\n')
userNO = self.findUserFollowTopicNotSpider()
if len(userNO) > 0:
print('\n爬取用户 (%s) 关注的话题\n' % userNO)
url = self.getPageUrl(userNO)
yield scrapy.Request(url=url, headers=HEADERS, meta=META, callback=self.parse)
else:
print('所有用户都已经爬取完毕')
return None
@staticmethod
def writeFollowTopicToMongoDB(UId, follow, count):
print('分析完毕,开始写入 MongoDB')
myClient = pymongo.MongoClient(host='127.0.0.1', port=27017)
mydb = myClient['CloudComputing']
mycol = mydb['UserFollow']
if len(follow) > 0:
print('follow topic ======')
# 更新 mongodb
res = mycol.find_one({'urlToken': UId})
if res and 'urlToken' in res:
if 'topic' in res:
for topic in res['topic']:
if topic not in follow:
follow.append(topic)
mycol.update_one({'_id': res['_id']}, {'$set': {'topic': follow}})
else:
info = {
'urlToken': UId,
'topic': follow
}
mycol.insert_one(info)
count = len(follow)
print('写入完毕,用户 %s 已爬取 %d 个关注的话题' % (UId, count))
return count
@staticmethod
def getUserFollowTopicCountFromMongoDB(UId):
count = 0
myClient = pymongo.MongoClient(host='127.0.0.1', port=27017)
mydb = myClient['CloudComputing']
mycol = mydb['UserFollow']
res = mycol.find_one({'urlToken': UId})
if res and 'topic' in res:
count = len(res['topic'])
return count
@staticmethod
def findUserFollowTopicNotSpider():
userOKs = []
if os.path.isfile(file_UFTSpiderOk):
with open(file_UFTSpiderOk, 'r', encoding='utf-8') as f:
userOK = f.readline().strip()
while userOK:
userOKs.append(userOK.split(':::')[0])
userOK = f.readline().strip()
if os.path.isfile(file_UIdFrom):
with open(file_UIdFrom, 'r', encoding='utf-8') as f:
user = f.readline().strip()
while user:
if user.find(':::ERROR') == -1:
user = user.split(':::')[0]
if user not in userOKs:
return user
else:
user = f.readline().strip()
else:
user = f.readline().strip()
return ''
@staticmethod
def getPageUrl(UId):
return 'https://www.zhihu.com/people/' + UId + '/following/topics'
@staticmethod
def getAPIUrl(UId, offset):
return 'https://www.zhihu.com/api/v4/members/' + UId + '/following-topic-contributions?' \
+ 'include=data[*].topic.introduction' \
+ '&offset=' + str(offset) + '&limit=20' |
#!/usr/bin/env python
# coding: utf-8
import cgi
form = cgi.FieldStorage()
html_body = u"""
<html><head>
<meta http-equiv="content-type" content ="text/html;charset=utf-8">
</head>
<body>
%s
</body>
</html>"""
body_line = []
body = form.getvalue('body','N/A')
body = unicode(body,'utf-8','ignore')
for cnt in range(0,len(body),10):
line = body[:10]
line += ''.join([u'□' for i in range(len(line),10)])
body_line.append(line)
body = body[10:]
body_line_v = [u' '.join(reversed(x)) for x in zip(*body_line)]
print "Content-type: text/html¥n"
print (html_body % '<br />'.join(body_line_v)).encode('utf-8')
|
# -*- coding: utf-8 -*-
import io
import os
from jinja2 import Environment, FileSystemLoader, select_autoescape
import html_checker
from html_checker.export.render import ExporterRenderer
from html_checker.export.jinja_filters import highlight_html_filter
class JinjaExport(ExporterRenderer):
"""
Exporter with Jinja to produce an HTML report.
Keyword Arguments:
template_dir (string): Path to directory which contains template files.
Default to ``templates`` application directory.
Attributes:
TEMPLATES (dict): Each item is an available template where item key is
the document kind (as given in render context in 'modelize_***'
methods) and item value the template relative path from template
directory.
"""
klassname = __qualname__ # noqa: F821
FORMAT_NAME = "html"
DEFAULT_TEMLATE = "basic.html"
TEMPLATES = {
"stylesheet": "main.css",
"audit": "audit.html",
"summary": "summary.html",
"report": "report.html",
}
DOCUMENT_FILENAMES = {
"stylesheet": "main.css",
"audit": "index.html",
"summary": "index.html",
"report": "path-{}.html",
}
def __init__(self, *args, **kwargs):
template_dir = os.path.abspath(
os.path.join(
os.path.dirname(html_checker.__file__),
"templates",
)
)
self.template_dir = kwargs.pop("template_dir", None) or template_dir
self.jinja_env = self.get_jinjaenv()
super().__init__(*args, **kwargs)
def validate(self):
"""
Ensure template directory is valid.
Returns:
string: Returns an error message if any, else ``False``.
"""
# Directory exists
if not os.path.exists(self.template_dir):
msg = "Given template directory does not exists: {}"
return msg.format(self.template_dir)
# All required templates exist
missing_files = []
for name in sorted(self.TEMPLATES.keys()):
path = self.TEMPLATES[name]
if not os.path.exists(os.path.join(self.template_dir, path)):
missing_files.append(path)
if len(missing_files) > 0:
msg = "Some required files are missing from template directory: {}"
return msg.format(", ".join(missing_files))
return False
def get_jinjaenv(self):
"""
Start Jinja environment.
Returns:
jinja2.Environment: Initialized Jinja environment.
"""
env = Environment(
loader=FileSystemLoader(self.template_dir),
autoescape=select_autoescape(["html", "xml"])
)
env.filters["highlight_html"] = highlight_html_filter
return env
def get_template(self, filepath):
"""
Load and return Jinja template.
Arguments:
filepath (string): Filepath to the template from its registered
location in Jinja environment.
Returns:
jinja2.Template: Template ready to render.
"""
return self.jinja_env.get_template(filepath)
def render(self, context):
"""
Render document to JSON.
Rendered document is serialized to JSON string inside ``content``
item in document dict.
Arguments:
context (dict): Document context as returned from
``modelize_***`` methods.
Returns:
dict: The document ``context`` with its serialization inside
``content`` item.
"""
template_name = self.TEMPLATES[context["context"]["kind"]]
document = self.get_template(template_name)
return {
"document": context["document"],
"content": document.render(**{"export": context["context"]}),
}
def release(self, *args, **kwargs):
"""
Override original method to include 'stylesheet' document which is the
CSS stylesheet used from templates.
"""
documents = super().release(*args, **kwargs)
stylesheet_path = os.path.join(self.template_dir, self.TEMPLATES["stylesheet"])
with io.open(stylesheet_path, "r") as fp:
stylesheet = fp.read()
documents.append({
"document": self.DOCUMENT_FILENAMES["stylesheet"],
"content": stylesheet,
})
return documents
|
import os
xml_version = "3.8"
results_path = "" # Location of analysed results
db_path = "" # Point to final location of Excel database
db_name = "" # Set to final name for Excel database
xsd = "" # Set to where on shared drives this will be
xml_location = "" # Set to base location on shared drives for sending xml
pdf_location = "" # Set to base location on shared drives for storing pdf
# Global variables
test_method = "19"
# List of fields that do not require data for the xml and can contain no data
can_be_null = ["local_patient_id_2", "comments"]
allowed_authorisers = [""]
sample_status = {"w": "withdrawn", "f": "failed", "s": "sequenced"} |
#!/usr/bin/python3
"""0-lookup.py
"""
def lookup(obj):
"""eturns the list of available attributes and methods of an objec
Args:
obj: Object
"""
return dir(obj)
|
import math
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
import requests
from bs4 import BeautifulSoup
import re
last_film = {}
imdb_links = {}
TOKEN = '633998206:AAG_wQi0DWwUJIGwrZmg-XOubPXu707Z3Dk'
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
def get_href(film_name):
words = film_name.split()
url = 'https://www.google.ru/search?q='
for word in words:
url = url + word + '%20'
headers = {'User-Agent': 'Chrome/70.0.3538.77 Safari/537.36'}
url = url + 'смотреть%20онлайн%20в%20хорошем%20качестве'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html')
successor_urls = soup.findAll('div', class_='g')
links = []
k = 0
i = 0
while k < 5:
href = successor_urls[i].find('a').get('href')
if ("kinokrad" not in href) and ("kinobar" not in href) and ("smotri-filmi" not in href) \
and ("hdrezka.ag" not in href) and ("kinogo" not in href) and ("youtube" not in href) and ("gidonline" not in href):
match = re.search('&sa=U', href)
another_match = re.search("https?://", href)
n = match.start()
m = another_match.start()
links.append(successor_urls[i].find('a').get('href')[m:n])
k += 1
i += 1
return links
def get_wiki_href(film_name):
words = film_name.split()
url = 'https://www.google.ru/search?q='
for word in words:
url = url + word + '%20'
headers = {'User-Agent': 'Chrome/70.0.3538.77 Safari/537.36'}
url = url + 'фильм%20википедия'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html')
k = 0
for i in soup.findAll('cite'):
k += 1
if k == 5:
break
if 'wikipedia' in i.text:
return i.text
def get_wiki_poster(url):
headers = {'User-Agent': 'Chrome/70.0.3538.77 Safari/537.36'}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html')
return soup.find('table', class_='infobox').find('img').get('src')[2:]
def get_imdb_link(film_name):
url = "https://www.imdb.com/find?ref_=nv_sr_fn&q="
words = film_name.split()
for word in words:
url = url + word + '+'
url = url + "&s=all"
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html')
film_url = soup.find('td', class_='result_text').find('a').get('href')
new_url = "https://www.imdb.com" + film_url
return new_url
def get_poster(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html')
return soup.find('div', class_='poster').find('img').get('src')
def get_rating(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html')
return soup.find('div', class_='imdbRating').strong.get('title')
def get_info(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html')
return soup.find('div', class_='plot_summary').find(
'div', class_='summary_text').text.replace('\n', ' ').strip()
@dp.message_handler(commands=['start'], commands_prefix='!/')
async def process_start_command(message: types.Message):
await message.reply("Привет!\nНапиши мне название фильма!")
@dp.message_handler(commands=['help'], commands_prefix='!/')
async def process_help_command(message: types.Message):
await bot.send_message(message.from_user.id, "Напиши название фильма и я выдам о нем информацию \n \
/poster -- постер данного фильма \n \
/watch -- еще места, где посмотреть фильм \n \
/rating -- рейтинг фильма по версии imdb \n \
/wiki -- данный фильм на википедии \n \
/imdb -- данный фильм на imdb")
@dp.message_handler(commands=['rating'], commands_prefix='!/')
async def process_start_command(message: types.Message):
# try:
global last_film
try:
film_name = last_film[message.chat.id]
except BaseException:
await message.reply("Ты еще не указал название ни одного фильма :(")
try:
imdb_link = get_imdb_link(film_name)
except BaseException:
await bot.send_message(message.from_user.id, 'К сожалению, не нашел информацию по этому фильму в базе')
rating = get_rating(imdb_link)
await bot.send_message(message.from_user.id, rating)
@dp.message_handler(commands=['imdb'], commands_prefix='!/')
async def process_start_command(message: types.Message):
# try:
global last_film
try:
film_name = last_film[message.chat.id]
except BaseException:
await message.reply("Ты еще не указал название ни одного фильма :(")
try:
imdb_link = get_imdb_link(film_name)
except BaseException:
await bot.send_message(message.from_user.id, 'К сожалению, не нашел информацию по этому фильму в базе')
await bot.send_message(message.from_user.id, imdb_link)
@dp.message_handler(commands=['wiki'], commands_prefix='!/')
async def process_start_command(message: types.Message):
# try:
global last_film
try:
film_name = last_film[message.chat.id]
except BaseException:
await message.reply("Ты еще не указал название ни одного фильма :(")
try:
wiki_link = get_wiki_href(film_name)
except BaseException:
await bot.send_message(message.from_user.id, 'К сожалению, не нашел информацию по этому фильму в базе')
await bot.send_message(message.from_user.id, wiki_link)
@dp.message_handler(commands=['poster'], commands_prefix='!/')
async def process_start_command(message: types.Message):
global last_film
film_name = last_film[message.chat.id]
try:
wiki_link = get_wiki_href(film_name)
except BaseException:
await bot.send_message(message.from_user.id, 'К сожалению, не нашел информацию по этому фильму в базе')
poster_url = get_wiki_poster(wiki_link)
poster_url = "https://" + poster_url
try:
await bot.send_photo(message.chat.id, types.InputFile.from_url(poster_url))
except BaseException:
await message.reply("Ты еще не указал название ни одного фильма :(")
await bot.send_message(message.from_user.id, poster_url)
@dp.message_handler(commands=['watch'], commands_prefix='!/')
async def process_start_command(message: types.Message):
try:
film_name = last_film[message.chat.id]
except BaseException:
await message.reply("Ты еще не указал название ни одного фильма :(")
try:
answer = get_href(film_name)
await bot.send_message(message.from_user.id, 'Если вдруг ссылка была нерабочая, то еще можете попробовать посмотреть тут')
new_films = ''
for href in answer[1:]:
new_films = new_films + href + '\n'
await bot.send_message(message.from_user.id, new_films)
except BaseException:
await bot.send_message(message.from_user.id, 'К сожалению, не могу найти, где посмотреть этот фильм')
@dp.message_handler()
async def film_info(msg: types.Message):
# Тут мы получаем ссылку на то, где посмотреть фильм
global last_film
film_name = msg.text
last_film[msg.chat.id] = film_name
# Где посмотреть
try:
answer = get_href(film_name)
await bot.send_message(msg.from_user.id, 'Посмотреть фильм можно здесь')
await bot.send_message(msg.from_user.id, answer[0])
except BaseException:
await bot.send_message(msg.from_user.id, 'К сожалению, не могу найти, где посмотреть этот фильм')
try:
imdb_link = get_imdb_link(film_name)
except BaseException:
await bot.send_message(msg.from_user.id, 'К сожалению, не нашел информацию по этому фильму в базе')
# А вот и описание
info = get_info(imdb_link)
await bot.send_message(msg.from_user.id, info)
if __name__ == '__main__':
executor.start_polling(dp)
|
import boto3
import os
s3 = boto3.resource(
service_name='s3',
region_name='us-east-1',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_KEY_ID']
)
s3_resource = boto3.resource('s3')
bucket = s3.Bucket('pkxd-gsn')
for obj in bucket.objects.filter(Prefix='posts/images'):
if not os.path.exists(os.path.dirname(obj.key)):
os.makedirs(os.path.dirname(obj.key))
bucket.download_file(obj.key, obj.key)
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2020 James
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Based on https://github.com/Rapptz/discord.py/blob/master/discord/file.py
"""
import hashlib
import imghdr
import io
import struct
from time import time
from typing import TYPE_CHECKING, Union
__all__ = ("Image",)
if TYPE_CHECKING:
from os import PathLike
AnyPath = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
class Image:
"""A wrapper around common image files. Used for :meth:`steam.User.send`.
Parameters
----------
fp: Union[:class:`io.BufferedIOBase`, :class:`str`]
An image or path-like to pass to :func:`open`.
spoiler: :class:`bool`
Whether or not to mark the image as a spoiler.
.. note::
Currently supported image types include:
- PNG
- JPG/JPEG
- GIF
"""
# TODO add support for "webm", "mpg", "mp4", "mpeg", "ogv"
__slots__ = ("fp", "spoiler", "name", "width", "height", "type", "hash")
def __init__(self, fp: Union[io.IOBase, AnyPath], *, spoiler: bool = False):
if isinstance(fp, io.IOBase):
self.fp = fp
else:
self.fp: io.BufferedReader = open(fp, "rb")
if not (self.fp.seekable() and self.fp.readable()):
raise ValueError(f"File buffer {fp!r} must be seekable and readable")
if len(self) > 10485760:
raise ValueError("file is too large to upload")
# from https://stackoverflow.com/questions/8032642
head = self.fp.read(24)
if len(head) != 24:
raise ValueError("Opened file has no headers")
self.type = imghdr.what(None, head)
if self.type == "png":
check = struct.unpack(">i", head[4:8])[0]
if check != 0x0D0A1A0A:
raise ValueError("Opened file's headers do not match a standard PNG's headers")
width, height = struct.unpack(">ii", head[16:24])
elif self.type == "gif":
width, height = struct.unpack("<HH", head[6:10])
elif self.type == "jpeg":
try:
self.fp.seek(0) # read 0xff next
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF or ftype in (0xC4, 0xC8, 0xCC):
self.fp.seek(size, 1)
byte = self.fp.read(1)
while ord(byte) == 0xFF:
byte = self.fp.read(1)
ftype = ord(byte)
size = struct.unpack(">H", self.fp.read(2))[0] - 2
# we are at a SOFn block
self.fp.seek(1, 1) # skip `precision' byte.
height, width = struct.unpack(">HH", self.fp.read(4))
except Exception as exc:
raise ValueError from exc
else:
raise TypeError("Unsupported file type passed")
self.spoiler = spoiler
self.width = width
self.height = height
self.hash = hashlib.sha1(self.read()).hexdigest()
self.name = f'{int(time())}_{getattr(self.fp, "name", f"image.{self.type}")}'
def __len__(self):
return len(self.read())
def read(self) -> bytes:
self.fp.seek(0)
read = self.fp.read()
self.fp.seek(0)
return read
def test_jpeg(h, _): # adds support for more header types
# SOI APP2 + ICC_PROFILE
if h[0:4] == "\xff\xd8\xff\xe2" and h[6:17] == b"ICC_PROFILE":
return "jpeg"
# SOI APP14 + Adobe
if h[0:4] == "\xff\xd8\xff\xee" and h[6:11] == b"Adobe":
return "jpeg"
# SOI DQT
if h[0:4] == "\xff\xd8\xff\xdb":
return "jpeg"
imghdr.tests.append(test_jpeg)
|
import autocomplete_light
from django import forms
from apps.posto import models
class PostoCreateForm(autocomplete_light.ModelForm):
class Meta:
model = models.Posto
fields = '__all__'
class PostoUpdateForm(forms.ModelForm):
class Meta:
model = models.Posto
fields = '__all__'
|
from flask import Flask, request, redirect, render_template, session, flash, url_for
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
mysql = MySQLConnector(app, 'email')
app.secret_key = 'dfkndf.cdfsd.sd.dsv.sdv.sd.d.ds.v.v.v.!!'
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route('/')
def index():
query = "SELECT email FROM `email`.`emails`;"
emails = mysql.query_db(query)
return render_template('index.html', emails=emails)
@app.route('/add_email', methods=['POST'])
def add_email():
email = request.form['email']
valid = True
if email == "":
valid = False
flash("You need to enter something in the email field", 'danger')
if not EMAIL_REGEX.match(email):
valid = False
flash("Email is not valid", 'danger')
return redirect('/')
if valid != True:
return redirect('/')
else:
query1 = "SELECT * FROM `email`.`emails`;"
query2 = "INSERT INTO `email`.`emails`(`email`) VALUES (:email);"
data = {
"email": request.form['email']
}
emails = mysql.query_db(query1)
if len(email) != 0:
for i in emails:
if i['email'] == request.form['email']:
flash("Email already exists!", 'danger')
return redirect('/')
mysql.query_db(query2, data)
flash("Email Successfully Added", 'success')
return redirect('/')
return render_template('index.html')
app.run(debug=True)
|
import types
from graphene.types.field import Field
from graphene.types.unmountedtype import UnmountedType
from .hints import OptimizationHints
def field(field_type, *args, **kwargs):
if isinstance(field_type, UnmountedType):
field_type = Field.mounted(field_type)
optimization_hints = OptimizationHints(*args, **kwargs)
wrap_resolve = field_type.wrap_resolve
def get_optimized_resolver(self, parent_resolver):
resolver = wrap_resolve(parent_resolver)
resolver.optimization_hints = optimization_hints
return resolver
field_type.wrap_resolve = types.MethodType(get_optimized_resolver, field_type)
return field_type
|
from . import utils
import torch
def tensor_shape(tsr, words=None):
if words is not None:
print(words)
print(utils.tensor_shape(tsr))
def count_tensor(tsr, words=None):
if words is not None:
print(words)
print(utils.count_tensor(tsr))
def peek_tensor(tsr):
assert isinstance(tsr, torch.Tensor)
out_str = 'size:{}, dtype:{}, device:{}'\
.format(tsr.shape, tsr.dtype, tsr.device)
print(out_str)
def vis_2d(tsr, trans=lambda x:x):
row, col = tsr.shape[:2]
tab = []
for i in range(row):
rows = []
for j in range(col):
rows.append(str(trans(tsr[i, j])))
tab.append(rows)
lines = [' '.join(x) for x in tab]
tab_str = '\n'.join(lines)
print(tab_str)
|
class Device:
def __init__(self, name, conected_by):
self.name = name
self.conected_by = conected_by
self.connected = True
def __str__(self):
return f"Device {self.name} ({self.conected_by})"
def disconnected(self):
self.connected = False
print("Disconnected.")
class Printer(Device):
def __init__(self, name, connected_by, capacity):
super().__init__(name, connected_by)
self.capacity = capacity
self.remaning_page = capacity
def __str__(self):
return f"{super().__str__()} ({self.remaning_page} pages remaining)"
def printer(self, pages):
if not self.connected:
print("Your printer is not connected!")
return
print(f"Printing {pages} pages.")
self.remaning_page -= pages
printer = Device("Printer", "USB")
print(printer)
printer.disconnected()
printerLaze = Printer("PrinterWifi", "USB", 500)
printerLaze.disconnected()
printerLaze.printer(22) |
from django.db import models
from userprofile.models import Perfil
from reservas.models import ReservaArticulo
from reservas.models import ReservaEspacio
class PrestamoArticulo(models.Model):
reserva = models.ForeignKey(ReservaArticulo, on_delete=models.CASCADE)
administrador = models.ForeignKey(Perfil, on_delete=models.CASCADE)
ESTADOS_PRES = ((1, 'Vigente'), (2, 'Caducado'), (3, 'Perdido'))
estado_prestamo = models.IntegerField(choices=ESTADOS_PRES, default=1)
class PrestamoEspacio(models.Model):
reserva = models.ForeignKey(ReservaEspacio, on_delete=models.CASCADE)
administrador = models.ForeignKey(Perfil, on_delete=models.CASCADE)
ESTADOS_PRES = ((1, 'Vigente'), (2, 'Caducado'), (3, 'Perdido'))
estado_prestamo = models.IntegerField(choices=ESTADOS_PRES, default=1) |
from django.shortcuts import (
render,
redirect,
HttpResponseRedirect
)
from members.forms import (
RegistrationForm,
EditProfileForm,
)
from django.urls import reverse
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('home:home'))
else:
print(form.errors)
else:
form = RegistrationForm()
args = {'form': form}
return render(request, 'members/reg_form.html', args)
def view_profile(request):
args = {'user': request.user}
return render(request, 'members/profile.html', args)
def edit_profile(request):
if request.method == 'POST':
form = EditProfileForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('members:view_profile'))
else:
print(form.errors)
else:
form = EditProfileForm(instance=request.user)
args = {'form': form}
return render(request, 'members/edit_profile.html', args)
def change_password(request):
if request.method =='POST':
form = PasswordChangeForm(data=request.POST, user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(reverse('members:view_profile'))
else:
return redirect(reverse('members:change_password'))
else:
form = PasswordChangeForm(user=request.user)
args = {'form': form}
return render(request, 'members/change_password.html', args)
|
"""To run the following script, do the following
1) python
2) %run corrsin.py
"""
from __future__ import division
import numpy as np
pi = np.pi
from scipy.integrate import quadrature
from scipy.integrate import quad
from scipy.interpolate import splrep, splev
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 3
#font = {'family' : 'serif', 'weight' : 'bold', 'size' : 24}
#rc('font', **font)
N = 256
kmax = N/2
nu = 1
epsilon = 1
eta = ((nu**3)/epsilon)**0.25
lower_limit = eta
upper_limit = 5
dns_lower_limit = eta
dns_upper_limit = eta*kmax
L = 2*np.pi
print "nu = ", nu
print "Grid size N = ", N
print "Epsilon (energy supply rate) = ", epsilon
print "dns_upper_limit = ", dns_upper_limit
print "Based on input_energy supply rate: eta, kmaxeta = ", eta, kmax*eta
print
print
# Formula from Pope page 232
beta = 5.2
ceta = 0.4
cL = 6.78
p0 = 2
CKolm = 1.6
def fL(keta, cL, p0):
# kL = keta*L/eta
cLprime = cL*(eta/L)
return (keta/np.sqrt(keta**2 + cLprime))**(5.0/3+p0)
def feta(keta, beta, ceta):
return np.exp(-beta*(((keta)**4+ceta**4)**0.25-ceta))
def E(keta, cL, p0, beta, ceta):
return keta**(-5.0/3)*fL(keta,cL, p0)*feta(keta, beta, ceta)
def kE(keta, cL, p0, beta, ceta):
return keta*E(keta, cL, p0, beta, ceta)
def D(keta, cL, p0, beta, ceta):
return keta**(1.0/3)*E(keta, cL, p0, beta, ceta)
tolerance = 1e-6
max_iter = 100
Etot = quad(E, lower_limit, upper_limit, args=(cL, p0, beta, ceta,))
Dtot = quad(D, lower_limit, upper_limit, args=(cL, p0, beta, ceta,))
etastar = (2*CKolm*Dtot[0])**(3.0/4)
# eta = etastar(nu/epsilon**3)^(1/4)
print "eta_star = ", etastar
Etotal = CKolm*np.sqrt(epsilon*eta)*etastar**(2.0/3)*Etot[0]
# division by eta due integral wrt keta
print "Etotal = ", Etotal
# print "Error_ Etot = ", Etot[1]
print
Dtotal = 2*CKolm*epsilon*etastar**(-4.0/3)*Dtot[0]
print "Dtotal = ", Dtotal
# print "Error_Dtot = ", Dtot[1]/eta
print
dns_Etot = quad(E, dns_lower_limit, dns_upper_limit, args=(cL, p0, beta, ceta,))
dns_Dtot = quad(D, dns_lower_limit, dns_upper_limit, args=(cL, p0, beta, ceta,))
dns_kEtot = quad(kE, dns_lower_limit, dns_upper_limit, args=(cL, p0, beta, ceta,))
dns_Etotal = CKolm*np.sqrt(epsilon*eta)*etastar**(2.0/3)*dns_Etot[0]
# division by eta due integral wrt keta
print "dns_Etotal = ", dns_Etotal
# print "Error_dns_Etot = ", dns_Etot[1]
print
dns_Dtotal = 2*CKolm*epsilon*etastar**(-4.0/3)*dns_Dtot[0]
print "dns_Dtotal = ", dns_Dtotal
print
eta1 = ((nu**3)/dns_Dtotal)**0.25
print "Based on DNS integral estimate: eta, kmaxeta = ", eta1, kmax*eta1
Urms = (2.0*Etotal/3.0)**0.5
lam = (15.0*nu*Urms**2/Etotal)**0.5
Re = Urms*L/nu
Rlambda = Urms*lam/nu
avg_k = dns_kEtot[0]/dns_Etot[0]
print "Urms = ", Urms
print "lambda = ", lam
print "Re = ", Re
print "Rlambda = ", Rlambda
print "avg_k = ", avg_k
dx = 2*np.pi/N
print "CFL_dt (Courant no=0.5) = ", 0.5*dx/Urms
keta = np.arange(lower_limit, upper_limit, 0.1*eta)
k = keta/eta
ek = E(keta,cL, p0, beta, ceta)
fL_val = fL(keta, cL, p0)
feta_val = feta(keta, beta, ceta)
d_val = D(keta, cL, p0, beta, ceta)
#loglog(k, ek)
#show()
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2021 Philippe Faist
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Internal module. Internal API may move, disappear or otherwise change at any
# time and without notice.
from __future__ import print_function, unicode_literals
from ._parsingstatedelta import ParsingStateDelta
class LatexWalkerParsingStateEventHandler(object):
r"""
A LatexWalker parsing state event handler.
The LatexWalker instance will call methods on this object to determine how
to update the parsing state upon certain events, such as entering or exiting
math mode.
Events:
- enter math mode
- exit math mode
.. versionadded:: 3.0
The :py:class:`LatexWalkerParsingStateEventHandler` class was added in
`pylatexenc 3.0`.
"""
def enter_math_mode(self, math_mode_delimiter=None, trigger_token=None):
return ParsingStateDelta(
set_attributes=dict(
in_math_mode=True,
math_mode_delimiter=math_mode_delimiter
)
)
def leave_math_mode(self, trigger_token=None):
return ParsingStateDelta(
set_attributes=dict(
in_math_mode=False,
math_mode_delimiter=None
)
)
_default_parsing_state_event_handler = LatexWalkerParsingStateEventHandler()
class LatexWalkerBase(object):
r"""
Base class for a latex-walker. Essentially, this is all that the
classes and methods in the :py:mod:`latexnodes` module need to know about
what a LatexWalker does.
See also :py:class:`latexwalker.LatexWalker`.
.. versionadded:: 3.0
The :py:class:`LatexWalkerBase` class was added in `pylatexenc 3.0`.
"""
def parsing_state_event_handler(self):
r"""
Doc......
"""
return _default_parsing_state_event_handler
def parse_content(self, parser, token_reader=None, parsing_state=None,
open_context=None, **kwargs):
r"""
Doc......
"""
raise RuntimeError("LatexWalkerBase subclasses must reimplement parse_content()")
def make_node(self, node_class, **kwargs):
r"""
Doc......
"""
raise RuntimeError("LatexWalkerBase subclasses must reimplement make_node()")
def make_nodelist(self, nodelist, **kwargs):
r"""
Doc......
"""
raise RuntimeError("LatexWalkerBase subclasses must reimplement make_nodelist()")
def make_nodes_collector(self,
token_reader,
parsing_state,
**kwargs):
r"""
Doc......
"""
raise RuntimeError(
"LatexWalkerBase subclasses must reimplement make_nodes_collector()")
def make_latex_group_parser(self, delimiters):
r"""
Doc......
"""
raise RuntimeError(
"LatexWalkerBase subclasses must reimplement make_latex_group_parser()")
def make_latex_math_parser(self, math_mode_delimiters):
r"""
Doc......
"""
raise RuntimeError(
"LatexWalkerBase subclasses must reimplement make_latex_math_parser()")
def check_tolerant_parsing_ignore_error(self, exc):
r"""
You can inspect the exception object `exc` and decide whether or not to
attempt to recover from the exception (if you want to be tolerant to
parsing errors).
Return the exception object if it should be raised, or return None if
recovery should be attempted.
"""
return exc
def format_node_pos(self, node):
r"""
Doc......
"""
return 'character position '+repr(node.pos)
|
from flask import Flask, render_template, request
from flask_debug import Debug
from sqlalchemy.orm import sessionmaker
import numpy as np
import pickle
import pandas as pd
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/analysis")
def analysis():
return render_template("analysis.html")
@app.route("/severity")
def severity():
return render_template("severity.html")
@app.route("/links")
def links():
return render_template("links.html")
@app.route("/faqs")
def faqs():
return render_template("faqs.html")
@app.route("/consultation")
def consultation():
return render_template("consultation.html")
@app.route("/knowmore")
def knowmore():
return render_template("knowmore.html")
@app.route("/place")
def place():
return render_template("place.html")
@app.route('/placefind',methods = ['POST','GET'])
def placefind():
if request.method == 'POST':
s = request.form['cities']
df = pd.read_excel('input/Book1.xlsx')
dis = []
for i,j in df.iterrows():
if s== j['HOSPITAL_LOCATION']:
m = j['HOSPITAL_NAME'],j['HOSPITAL_ADDRESS'],j['HOSPITAL_PHONENO'],j['HOSPITAL_LOCATION']
dis.append(m)
return render_template("list.html",u=dis)
@app.route('/predict',methods = ['POST'])
def predict():
ui = []
if request.method == 'POST':
ui.append(int(request.form['q1']))
ui.append(int(request.form['q2']))
ui.append(int(request.form['q3']))
ui.append(int(request.form['q4']))
ui.append(int(request.form['q5']))
ui.append(int(request.form['q6']))
ui.append(int(request.form['q7']))
ui.append(int(request.form['q8']))
ui.append(int(request.form['q9']))
ui.append(int(request.form['q10']))
ui.append(int(request.form['q11']))
ui.append(int(request.form['q12']))
ui.append(int(request.form['q13']))
ui.append(int(request.form['q14']))
ui.append(int(request.form['q15']))
print("its come to 1 if")
print(ui)
l=[]
for i in ui:
l.append(i)
l1=[]
l1.append(l)
print(l1)
filename=r'C:\Users\ananda2\OneDrive - UHG\Hackathon\model\rf_train_model.sav'
rfc=pickle.load(open(filename, 'rb'))
result = rfc.predict(l1)
print(result)
if result==1:
res="The child may have Autism Spectrum Disorder"
else:
res="The child doesn't have Autism Spectrum Disorder"
return render_template('result.html',u=res,re=result)
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 15 15:02:48 2019
@author: S80240
"""
import telemetroly
from tqdm import tqdm
import datetime
def last_day_of_month(any_day):
next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail
return next_month - datetime.timedelta(days=next_month.day)
months=[]
for month in range(1, 13):
month_str=str(last_day_of_month(datetime.date(2009, month, 1)))
months.append(month_str[-2:]+'-'+month_str[5:7]+'-'+month_str[:4])
#for month in months[4:]:
for month in months[-2:-1]:
month='31-01-2009'
query='afp'
minDate='01'+month[2:]
maxDate=month
page=12
#q=next(search_query)
#print(q.bib['title'].replace(' ','_'))
search_query = telemetroly.search_pubs_query(query,minDate,maxDate,page)
f= open("..//"+query+maxDate+".txt","a+")#,errors = 'ignore'
for q in tqdm(search_query):
try:
f.write(q.bib['title']+"|"+q.bib['kicker']+"|"+q.bib['date']+"|"+q.bib['link']+"|"+q.bib['summary']+"|"+q.bib['body']+"\n")
except:
f_e= open("..//"+query+maxDate+"_exception.txt","a+")
f_e.write(q.bib['title']+"|"+q.bib['kicker']+"|"+q.bib['date']+"|"+q.bib['link']+"\n")
f_e.close()
f.close()
#####
month='30-11-2010'
query='afp'
minDate='01'+month[2:]
maxDate=month
page=11
#q=next(search_query)
#print(q.bib['title'].replace(' ','_'))
search_query = telemetroly.search_pubs_query(query,minDate,maxDate,page)
f= open("..//"+query+maxDate+".txt","a+")#,errors = 'ignore'
for q in tqdm(search_query):
try:
f.write(q.bib['title']+"|"+q.bib['kicker']+"|"+q.bib['date']+"|"+q.bib['link']+"|"+q.bib['summary']+"|"+q.bib['body']+"\n")
except:
f_e= open("..//"+query+maxDate+"_exception.txt","a+")
f_e.write(q.bib['title']+"|"+q.bib['kicker']+"|"+q.bib['date']+"|"+q.bib['link']+"\n")
f_e.close()
f.close()
##################################
q1= next(search_query)
print(q1)
query='Thiago Alcántara tendrá que volver a operarse tras recaer de su lesión'
minDate='01-10-2014'
maxDate='31-10-2014'
bad_chars='[(){}<>,.@;:"\'?¿!¡/|]'
#q=next(search_query)
#print(q.bib['title'].replace(' ','_'))
search_query = telemetroly.search_pubs_query(query,minDate,maxDate)
f= open("1.txt","a+")#,errors = 'ignore'
for q in tqdm(search_query):
f.write(q.bib['title']+"|"+q.bib['kicker']+"|"+q.bib['date']+"|"+q.bib['link']+"|"+q.bib['summary']+"|"+q.bib['body']+"\n")
f.close()
|
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import UniqueConstraint
Base = declarative_base()
class userSkillProgressPlan(Base):
__tablename__ = 'userSkillProgressPlan'
# here we'll define what skill sets a particular person wants to work on,
# who they want to work on them with, and what their goal is.
userId = Column(Integer, primary_key=True, default=None)
skillSetId = Column(Integer, primary_key=True, default=None)
collabSetId = Column(Integer, primary_key=True, default=None)
goalSetId = Column(Integer, primary_key=True, default=None)
goalDeadlineInWeeks = Column(Integer, primary_key=True, default=None)
class users(Base):
__tablename__ = 'users'
userId = Column(Integer, primary_key=True)
userFullName = Column(String, nullable=False)
userName = Column(String, nullable=False, unique=True)
class skills(Base):
__tablename__ = 'skills'
skillId = Column(Integer, primary_key=True)
skillName = Column(String, nullable=False, unique=True)
skillDescription = Column(String, nullable=False)
class skillsMappedToSkillSet(Base):
__tablename__ = 'skillsMappedToSkillSet'
# Here we'll define which skills are in a skill set.
skillSetId = Column(Integer, primary_key=True)
skillIds = Column(String, nullable=False)
## This should be linked to what you want to work on. i.e. work life, a project, etc.
skillSetName = Column(String, nullable=False, unique=True)
class collaboratorMappedToCollabSet(Base):
__tablename__ = 'collaboratorMappedToCollabSet'
# Here we'll define which collaborators are in a collab set.
collabSetId = Column(Integer, primary_key=True)
collaboratorId = Column(Integer, primary_key=True, nullable=False, default=None)
collabAssocSkillSetId = Column(Integer, primary_key=True)
collabSetName = Column(String, nullable=False) ## this will need a foreign key dependency on skillSetName
class goalsMappedToGoalSets(Base):
__tablename__ = 'goalsMappedToGoalSets'
# Here we'll define what goals are in the goal set.
goalSetId = Column(Integer, primary_key=True)
goalId = Column(Integer, primary_key=True, nullable=False, default=None)
goalSetName = Column(String, nullable=False) ## this will need a foreign key dependency on skillSetName
class goals(Base):
__tablename__ = 'goals'
goalId = Column(Integer, primary_key=True)
skillId = Column(Integer, primary_key=True, nullable=False, default=None)
skillLevel = Column(Integer, nullable=False, default=None)
class weeklySkillSetFeedBack(Base):
__tablename__ = 'weeklySkillSetFeedBack'
# here we'll define what skill sets a particular person wants to work on,
# who they want to work on them with, and what their goal is.
userId = Column(Integer, primary_key=True, default=None)
weekId = Column(Integer, primary_key=True)
skillSetId = Column(Integer, primary_key=True, nullable=False, default=None)
skillId = Column(Integer, primary_key=True, nullable=False, default=None)
collaboratorId = Column(Integer, primary_key=True, nullable=False, default=None)
skillLevel = Column(Integer, nullable=False, default=None)
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:///userProgressPlans.db')
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine) |
#!/usr/bin/env python
import sys
import os
curdir = os.path.dirname(os.path.realpath(__file__))
sdir = os.path.join(curdir,"../")
sys.path.append(sdir)
from multiprocessing import Pool
from fastqtools.fastqReader.fastqReader import fastqReader
from fastqtools.fastqReader.fastqWriter import fastqWriter
from fastqtools.readprocess.readprocess import readprocess
from fastqtools.fastqSplit.fastqSplit import fastqSplit
from fastqtools.fastqSplit.fastqMerge import fastqMerge
from fastqtools.fastqStat.fastqStat import fastqStat
import os
import uuid
def readclean(read,qual,head1,tail1,head2,tail2,n_percent,autoadapt,umis,min_length):
r = readprocess(read)
r.qual(qual,qual_percent)
r.trim(head1,tail1,head2,tail2)
r.nbase(n_percent)
r.autoadaptremove(auto_adapt)
r.umi(umis)
r.length(min_length)
return r
def clean(r1,r2,prefix,qual,head1,tail1,head2,tail2,n_percent,autoadapt,umis,min_length):
i = 0
filts = []
reads = fastqReader(r1,r2)
tmp_name = "tmp.%s" % uuid.uuid1()
ps = []
for read in reads:
read = readclean(read,qual,head1,tail1,head2,tail2,n_percent,autoadapt,umis,min_length)
fastqWriter(read,tmp_name)
fq1 = "%s_R1.clean.fastq" % prefix
fq2 = "%s_R2.clean.fastq" % prefix
cmd = "mv %s_R1.fastq %s" % (tmp_name,fq1)
os.system(cmd)
cmd = "mv %s_R2.fastq %s" % (tmp_name,fq2)
os.system(cmd)
return (fq1,fq2)
def main(r1,r2,prefix,qual,head1,tail1,head2,tail2,n_percent,autoadapt,umis,min_length,threads):
if threads == 1:
clean(r1,r2,prefix,qual,head1,tail1,head2,tail2,n_percent,autoadapt,umis,min_length)
return
fqs = fastqSplit(r1,r2,threads,prefix)
pools = Pool(threads)
ps = []
todel = []
for idx,item in enumerate(fqs.items()):
r1,r2 = item[1]
todel.append(r1)
todel.append(r2)
prex = prefix + "-" + str(idx)
p = pools.apply_async(clean,(r1,r2,prex,qual,head1,tail1,head2,tail2,n_percent,autoadapt,umis,min_length))
ps.append(p)
pools.close()
pools.join()
filts = []
for p in ps:
r1,r2 = p.get()
todel.append(r1)
todel.append(r2)
filts.append([r1,r2])
fq1,fq2 = fastqMerge(filts,prefix+".clean")
for d in todel:
os.remove(d)
return fq1,fq2
if __name__ == "__main__":
from docopt import docopt
usage = """
Usage:
fastqClean.py [options] -1 <fastq1> -2 <fastq2> -o <prefix>
Options:
#basic arguments
-1,--r1=<fastq> R1 fastq
-2,--r2=<fastq> R2 fastq
-o,--prefix=<prefix> ouput prefix
-t,--threads=<int> threads used [default: 1]
#quality filtering
--min-qual=<quality> minimum base quality [default: 15]
--min-qual-max-percent=<num> minimum base quality max percentage [default: 0.5]
--max-nbase-percent=<num> max N-base percentage [default: 0.1]
#length filtering
--min-length=<num> minimum read length [default: 30]
#trimming
--trim1-head=<num> trim r1 num base from R1 head [default: 0]
--trim1-tail=<num> trim r1 num base from R1 tail [default: 0]
--trim2-head=<num> trim r2 num base from R2 head [default: 0]
--trim2-tail=<num> trim r2 num base from R2 tail [default: 0]
#adapter
--auto-adaptor-trim auto adapt trim throgh r1/r2 overlap recommended
#umi format
--umi=<length|file> umi1 length or file has umi-barcode file.
"""
args = docopt(usage)
q1 = args["--r1"]
q2 = args["--r2"]
prefix = args["--prefix"]
qual = int(args["--min-qual"])
threads = int(args["--threads"])
qual_percent = float(args["--min-qual-max-percent"])
n_percent = float(args["--max-nbase-percent"])
min_length = int(args["--min-length"])
head1 = int(args["--trim1-head"])
head2 = int(args["--trim2-head"])
tail1 = int(args["--trim1-tail"])
tail2 = int(args["--trim2-tail"])
auto_adapt = args["--auto-adaptor-trim"]
umi = args["--umi"]
main(q1,q2,prefix,qual,head1,tail1,head2,tail2,n_percent,auto_adapt,umi,min_length,threads)
|
# coding:utf-8
import theano
import theano.tensor as T
import numpy as np
class Optimizer_SGD_my(object):
def __init__(self, lr=0.1, momentum=0.5, decay=0.01, nesterov=False):
super(Optimizer_SGD_my, self).__init__()
self.lr = theano.shared(lr)
self.momentum = theano.shared(momentum)
self.decay = theano.shared(decay)
self.nesterov = nesterov
self.updates = []
self.momentum_weights = None
def get_updates(self, weights_list, grads_list, loss, constraints):
# 计算梯度
grads = T.grad(loss, weights_list)
# 记录权值更新的动量
self.momentum_weights = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
for p, g, m in zip(weights_list, grads, self.momentum_weights):
v = self.momentum * m - self.lr * g
# 更新动量
self.updates.append((m, v))
# 更新权值,没有验证此处的 nesterov 跟理论算法一致
if self.nesterov:
new_p = p + self.momentum * v - self.lr * g
else:
new_p = p + v
self.updates.append((p, new_p))
# 取得梯度数据,用于显示或查看
for gl, g in zip(grads_list, grads):
self.updates.append((gl, g))
return self.updates
def update_lr(self):
self.lr /= (1 + self.decay)
class Optimizer_Adagrad_my(object):
def __init__(self, lr=0.01, epsilon=1e-6):
super(Optimizer_Adagrad_my, self).__init__()
self.lr = theano.shared(lr)
self.epsilon = theano.shared(epsilon)
self.accumulators = None
self.updates = []
def get_updates(self, weights_list, grads_list, loss, constraints):
# 计算梯度
grads = T.grad(loss, weights_list)
# accumulators
self.accumulators = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
for p, g, a in zip(weights_list, grads, self.accumulators):
new_a = a + T.sqr(g) # update accumulator
self.updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
self.updates.append((p, new_p))
# 取得梯度数据,用于显示或查看
for gl, g in zip(grads_list, grads):
self.updates.append((gl, g))
return self.updates
class Optimizer_Adadelta_my(object):
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6):
super(Optimizer_Adadelta_my, self).__init__()
self.lr = theano.shared(lr)
self.rho = theano.shared(rho)
self.epsilon = theano.shared(epsilon)
self.weights = None
self.updates = []
def get_updates(self, weights_list, grads_list, loss, constraints):
# 计算梯度
grads = T.grad(loss, weights_list)
accumulators = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
delta_accumulators = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
self.weights = accumulators + delta_accumulators
for p, g, a, d_a in zip(weights_list, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * T.sqr(g)
self.updates.append((a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a + self.epsilon)
new_p = p - self.lr * update
self.updates.append((p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * T.sqr(update)
self.updates.append((d_a, new_d_a))
# 取得梯度数据,用于显示或查看
for gl, g in zip(grads_list, grads):
self.updates.append((gl, g))
return self.updates
class Optimizer_RMSprop_my(object):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6):
super(Optimizer_RMSprop_my, self).__init__()
self.lr = theano.shared(lr)
self.rho = theano.shared(rho)
self.epsilon = epsilon
self.weights = None
self.updates = []
def get_updates(self, weights_list, grads_list, loss, constraints):
# 计算梯度
grads = T.grad(loss, weights_list)
# accumulators
self.weights = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
for p, g, a in zip(weights_list, grads, self.weights):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * T.sqr(g)
self.updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
self.updates.append((p, new_p))
# 取得梯度数据,用于显示或查看
for gl, g in zip(grads_list, grads):
self.updates.append((gl, g))
return self.updates
class Optimizer_Adam_my(object):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8):
super(Optimizer_Adam_my, self).__init__()
self.iterations = theano.shared(0.)
self.lr = theano.shared(lr)
self.beta_1 = theano.shared(beta_1)
self.beta_2 = theano.shared(beta_2)
self.epsilon = epsilon
self.updates = []
self.weights = None
def get_updates(self, weights_list, grads_list, loss, constraints):
# 计算梯度
grads = T.grad(loss, weights_list)
self.updates = [(self.iterations, self.iterations + 1.)]
t = self.iterations + 1
lr_t = self.lr * T.sqrt(1. - T.pow(self.beta_2, t)) / (1. - T.pow(self.beta_1, t))
ms = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
vs = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
self.weights = ms + vs
for p, g, m, v in zip(weights_list, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * T.sqr(g)
p_t = p - lr_t * m_t / (T.sqrt(v_t) + self.epsilon)
self.updates.append((m, m_t))
self.updates.append((v, v_t))
new_p = p_t
self.updates.append((p, new_p))
# 取得梯度数据,用于显示或查看
for gl, g in zip(grads_list, grads):
self.updates.append((gl, g))
return self.updates
class Optimizer_Adamax_my(object):
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-8):
super(Optimizer_Adamax_my, self).__init__()
self.iterations = theano.shared(0.)
self.lr = theano.shared(lr)
self.beta_1 = theano.shared(beta_1)
self.beta_2 = theano.shared(beta_2)
self.epsilon = epsilon
self.updates = []
self.weights = None
def get_updates(self, weights_list, grads_list, loss, constraints):
# 计算梯度
grads = T.grad(loss, weights_list)
self.updates = [(self.iterations, self.iterations + 1.)]
t = self.iterations + 1
lr_t = self.lr / (1. - T.pow(self.beta_1, t))
# zero init of 1st moment
ms = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
# zero init of exponentially weighted infinity norm
us = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
self.weights = ms + us
for p, g, m, u in zip(weights_list, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = T.maximum(self.beta_2 * u, T.abs_(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append((m, m_t))
self.updates.append((u, u_t))
new_p = p_t
self.updates.append((p, new_p))
# 取得梯度数据,用于显示或查看
for gl, g in zip(grads_list, grads):
self.updates.append((gl, g))
return self.updates
class Optimizer_Nadam_my(object):
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-8, schedule_decay=0.004):
super(Optimizer_Nadam_my, self).__init__()
self.iterations = theano.shared(0.)
self.m_schedule = theano.shared(1.)
self.lr = theano.shared(lr)
self.beta_1 = theano.shared(beta_1)
self.beta_2 = theano.shared(beta_2)
self.schedule_decay = schedule_decay
self.epsilon = epsilon
self.updates = []
self.weights = None
def get_updates(self, weights_list, grads_list, loss, constraints):
# 计算梯度
grads = T.grad(loss, weights_list)
# self.updates = [K.update_add(self.iterations, 1)]
self.updates = [(self.iterations, self.iterations + 1.)]
t = self.iterations + 1
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (1. - 0.5 * (T.pow(0.96, t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (1. - 0.5 * (T.pow(0.96, (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
# shapes = [K.get_variable_shape(p) for p in weights_list]
# ms = [K.zeros(shape) for shape in shapes]
# vs = [K.zeros(shape) for shape in shapes]
ms = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
vs = [theano.shared(np.zeros(p.get_value().shape)) for p in weights_list]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(weights_list, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * T.sqr(g)
v_t_prime = v_t / (1. - T.pow(self.beta_2, t))
m_t_bar = (1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append((m, m_t))
self.updates.append((v, v_t))
p_t = p - self.lr * m_t_bar / (T.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
self.updates.append((p, new_p))
# 取得梯度数据,用于显示或查看
for gl, g in zip(grads_list, grads):
self.updates.append((gl, g))
return self.updates
|
from flask import jsonify
from app import create_app, socket_io
from app.config import Config
from Exceptions import NotFound, MethodNotAllowed, \
Forbiden, InternalServerError, ExistingResource,\
BadRequest, AuthError, UnAuthorized
config = Config()
app = create_app(config)
@app.errorhandler(NotFound)
@app.errorhandler(Forbiden)
@app.errorhandler(MethodNotAllowed)
@app.errorhandler(InternalServerError)
@app.errorhandler(ExistingResource)
@app.errorhandler(UnAuthorized)
@app.errorhandler(BadRequest)
def api_error(error):
payload = dict(error.payload or ())
payload['code'] = error.status_code
payload['message'] = error.message
payload['success'] = error.success
return jsonify(payload), error.status_code
if __name__ == "__main__":
socket_io.run(app)
|
# 3.4.2 3層ニューラルネットワークの計算
# 2=>3=>2 のニューラルネットワーク
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
print('------1層目------------------------------------')
X = np.array([1.0, 0.5])
W1 = np.array( [[ 0.1, 0.3, 0.5 ], [ 0.2, 0.4, 0.6 ]] ) # 2,3format
B1 = np.array( [0.1, 0.2, 0.3] )
print( W1.shape )
print( X.shape )
print( B1.shape )
A1 = np.dot(X, W1) + B1
print(A1)
Z1 = sigmoid(A1)
print(Z1)
print('------2層目------------------------------------')
W2 = np.array( [[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]] ) # 3,2 format
B2 = np.array( [0.1, 0.2] )
A2 = np.dot( Z1, W2 ) + B2
print(A2)
Z2 = sigmoid(A2)
print( Z2 )
print('------3層目------------------------------------')
W3 = np.array( [ [0.1, 0.3], [0.2, 0.4] ] )
B3 = [0.1,0.2]
A3 = np.dot(Z2, W3) + B3
print(A3)
Z3 = sigmoid(A3)
print(Z3) |
# -*- coding: utf-8 -*-
'''
* @Author : bpf
* @Date : 2020-09-20 18:51:37
* @Description : 生成算式
* @LastEditTime : 2020-09-21 09:35:06
'''
import datetime
import os
from formula import OPT, GeneralFormular, ComputeFormular
def getInput():
'''
* 获取输入参数
'''
print("{:^18} | {:^5} | {:^8}".format("参数", "数值范围", "请输入"))
print("{0:-<21}+{0:-<11}+{0:-<12}".format('-'))
n = input("{:>14} | {:9} | ".format("生成算式数量", "[>=1]"))
while True:
try:
n = abs(int(n))
break
except Exception as e:
print("[Eror]: Input illegal! Please input again... ", end="")
n = input()
up_limit = input("{:>16} | {:9} | ".format("数值上限", "[>=10]"))
while True:
try:
up_limit = abs(int(up_limit))
break
except Exception as e:
print("[Eror]: Input illegal! Please input again... ", end="")
up_limit = input()
oper_num = input("{:>15} | {:9} | ".format("操作数个数", "[>=2]"))
while True:
try:
oper_num = abs(int(oper_num))
if oper_num < 2:
oper_num = 2
print("[Eror]: Input illegal! Use default value 2.")
break
except Exception as e:
print("[Eror]: Input illegal! Please input again... ", end="")
oper_num = input()
oper_variety = input("{:>15} | {:9} | ".format("运算符种数", "[1~4]"))
while True:
try:
oper_variety = abs(int(oper_variety))
if oper_variety < 1 or oper_variety > 4:
oper_variety = 4
print("[Eror]: Input illegal! Use default value 4.")
break
except Exception as e:
print("[Eror]: Input illegal! Please input again... ", end="")
oper_variety = input()
has_fraction = input("{:>14} | {:9} | ".format("是否包含分数", "[0, 1]"))
while True:
try:
has_fraction = abs(int(has_fraction))
if has_fraction != 0 and has_fraction != 1:
has_fraction = 0
print("[Eror]: Input illegal! Use default value 0.")
break
except Exception as e:
print("[Eror]: Input illegal! Please input again... ", end="")
has_fraction = input()
print("{0:-<46}".format('-'))
return int(n), int(up_limit), int(oper_num), int(oper_variety), int(has_fraction)
def getOutputMode():
'''
* 获取输出模式
'''
print(" 算式生成中 ...")
print("{0:-<46}".format('-'))
print("{:^37}".format("请选择算式输出模式"))
print(" 1. 普通模式")
print(" 2. 问答模式")
print(" 3. 文件模式")
mode = input(" 请选择:")
print("{0:-<46}".format('-'))
try:
mode = abs(int(mode))
except Exception as e:
mode = 1
return mode
def displayFormular(mode, formulars, results):
'''
* 输出算式
'''
count = 0
if mode == 1:
for i in range(len(formulars)):
print(formulars[i], results[i])
elif mode == 2:
start = datetime.datetime.now()
for i in range(len(formulars)):
# print(,end="")
result = input("第{}题:".format(i+1) + formulars[i])
if result == results[i]:
count += 1
flag = "正确✔✔✔"
else:
flag = "错误×"
print(" 正确答案:{} 回答{}".format(results[i], flag))
print("\n正确率为:{}/{}({:.2f}%)".format(count, len(formulars), 100*float(count/len(formulars))))
print("耗时:{}".format(datetime.datetime.now() - start))
else:
filepath = input(" 请输入文件路径:")
if not filepath.endswith(".txt"):
portion = os.path.splitext(filepath)
filepath = portion[0] + ".txt"
print("[Eror]: Input illegal! Use default value <{}>.".format(filepath))
try:
with open(filepath, 'w+') as f:
for i in range(len(formulars)):
f.write(formulars[i] + results[i] + "\n")
print(" 文件写入成功")
print("{0:-<46}".format('-'))
except Exception as e:
print("Error: ", e)
print("{0:-<46}".format('-'))
if __name__ == "__main__":
n, up_limit, oper_num, oper_variety, has_fraction = getInput()
# n, up_limit, oper_num, oper_variety, has_fraction = 5, 20, 4, 4, 0
opt = OPT(up_limit, oper_num, oper_variety, has_fraction)
gf = GeneralFormular(opt)
cf = ComputeFormular()
formulars = []
results = []
for i in range(int(n)):
f = gf.solve()
formulars.append(" ".join(i for i in f) + " = ")
results.append(cf.solve(f))
mode = getOutputMode()
displayFormular(mode, formulars, results)
|
# Receba um número inteiro na entrada e imprima Fizz se o número for divisível por 3. Caso contrário, imprima o mesmo número que foi dado na entrada.
n = int(input("Digite um número inteiro: "))
if n % 3 == 0:
print("Fizz")
else:
print(n) |
import scipy as sp
import numpy as np
from scipy.sparse import diags
from scipy.sparse.linalg import spsolve
from scipy.linalg import det, eigh, eigvalsh
import time
# python 3 imports
from suftware.src import utils
from suftware.src import supplements
from suftware.src import maxent
# Import error handling
from suftware.src.utils import ControlledError
# Put hard bounds on how big or small t can be. T_MIN especially seems to help convergence
T_MAX = 40
T_MIN = -40
PHI_MAX = utils.PHI_MAX
PHI_MIN = utils.PHI_MIN
MAX_DS = -1E-3
PHI_STD_REG = utils.PHI_STD_REG
class Results():
pass
# Represents a point along the MAP curve
class MAP_curve_point:
def __init__(self, t, phi, Q, log_E, log_Z_correction, sample_mean, sample_mean_std_dev, details=False):
self.t = t
self.phi = phi
self.Q = Q
self.log_E = log_E
self.log_Z_correction = log_Z_correction
self.sample_mean = sample_mean
self.sample_mean_std_dev = sample_mean_std_dev
# self.details = details
# Represents the MAP curve
class MAP_curve:
def __init__(self):
self.points = []
self._is_sorted = False
def add_point(self, t, phi, Q, log_E, log_Z_correction, sample_mean, sample_mean_std_dev, details=False):
point = MAP_curve_point(t, phi, Q, log_E, log_Z_correction, sample_mean, sample_mean_std_dev, details)
self.points.append(point)
self._is_sorted = False
def sort(self):
self.points.sort(key=lambda x: x.t)
self._is_sorted = True
# Use this to get actual points along the MAP curve. This ensures that points are sorted
def get_points(self):
if not self._is_sorted:
self.sort()
return self.points
def get_maxent_point(self):
if not self._is_sorted:
self.sort()
p = self.points[0]
if not (p.t == -sp.Inf):
raise ControlledError('/MAP_curve/ Not getting MaxEnt point: t = %f' % p.t)
return p
def get_histogram_point(self):
if not self._is_sorted:
self.sort()
p = self.points[-1]
if not (p.t == sp.Inf):
raise ControlledError('/MAP_curve/ Not getting histogram point: t = %f' % p.t)
return p
def get_log_evidence_ratios(self, finite=True):
log_Es = sp.array([p.log_E for p in self.points])
ts = sp.array([p.t for p in self.points])
if finite:
indices = (log_Es > -np.Inf) * (ts > -np.Inf) * (ts < np.Inf)
return log_Es[indices], ts[indices]
else:
return log_Es, ts
#
# Convention: action, gradient, and hessian are G/N * the actual. This provides for more robust numerics
#
# Evaluate the action of a field given smoothness criteria
def action(phi, R, Delta, t, N, phi_in_kernel=False, regularized=False):
# Make sure phi is valid
if not all(np.isreal(phi)):
raise ControlledError('/action/ phi is not real: phi = %s' % phi)
if not all(np.isfinite(phi)):
raise ControlledError('/action/ phi is not finite: phi = %s' % phi)
# Make sure t is valid
if not np.isreal(t):
raise ControlledError('/action/ t is not real: t = %s' % t)
# if not np.isfinite(t):
# raise ControlledError('/action/ t is not finite: t = %s' % t)
# Make sure phi_in_kernel is valid
if not isinstance(phi_in_kernel, bool):
raise ControlledError('/action/ phi_in_kernel must be a boolean: phi_in_kernel = %s' % type(phi_in_kernel))
# Make sure regularized is valid
if not isinstance(regularized, bool):
raise ControlledError('/action/ regularized must be a boolean: regularized = %s' % type(regularized))
G = 1. * len(R)
quasiQ = utils.field_to_quasiprob(phi)
quasiQ_col = sp.mat(quasiQ).T
Delta_sparse = Delta.get_sparse_matrix()
phi_col = sp.mat(phi).T
R_col = sp.mat(R).T
ones_col = sp.mat(sp.ones(int(G))).T
if phi_in_kernel:
S_mat = G * R_col.T * phi_col + G * ones_col.T * quasiQ_col
else:
S_mat = 0.5 * sp.exp(
-t) * phi_col.T * Delta_sparse * phi_col + G * R_col.T * phi_col + G * ones_col.T * quasiQ_col
if regularized:
S_mat += 0.5 * (phi_col.T * phi_col) / (N * PHI_STD_REG ** 2)
S = S_mat[0, 0]
# Make sure S is valid
if not np.isreal(S):
raise ControlledError('/action/ S is not real at t = %s: S = %s' % (t, S))
if not np.isfinite(S):
raise ControlledError('/action/ S is not finite at t = %s: S = %s' % (t, S))
return S
# Evaluate action gradient w.r.t. a field given smoothness criteria
def gradient(phi, R, Delta, t, N, regularized=False):
# Make sure phi is valid
if not all(np.isreal(phi)):
raise ControlledError('/gradient/ phi is not real: phi = %s' % phi)
if not all(np.isfinite(phi)):
raise ControlledError('/gradient/ phi is not finite: phi = %s' % phi)
# Make sure t is valid
if not np.isreal(t):
raise ControlledError('/gradient/ t is not real: t = %s' % t)
if not np.isfinite(t):
raise ControlledError('/gradient/ t is not finite: t = %s' % t)
# Make sure regularized is valid
if not isinstance(regularized, bool):
raise ControlledError('/gradient/ regularized must be a boolean: regularized = %s' % type(regularized))
G = 1. * len(R)
quasiQ = utils.field_to_quasiprob(phi)
quasiQ_col = sp.mat(quasiQ).T
Delta_sparse = Delta.get_sparse_matrix()
phi_col = sp.mat(phi).T
R_col = sp.mat(R).T
grad_col = sp.exp(-t) * Delta_sparse * phi_col + G * R_col - G * quasiQ_col
if regularized:
grad_col += phi_col / (N * PHI_STD_REG ** 2)
grad = sp.array(grad_col).ravel()
# Make sure grad is valid
if not all(np.isreal(grad)):
raise ControlledError('/gradient/ grad is not real at t = %s: grad = %s' % (t, grad))
if not all(np.isfinite(grad)):
raise ControlledError('/gradient/ grad is not finite at t = %s: grad = %s' % (t, grad))
return grad
# Evaluate action hessian w.r.t. a field given smoothness criteria. NOTE: returns sparse matrix, not dense matrix!
def hessian(phi, R, Delta, t, N, regularized=False):
# Make sure phi is valid
if not all(np.isreal(phi)):
raise ControlledError('/hessian/ phi is not real: phi = %s' % phi)
if not all(np.isfinite(phi)):
raise ControlledError('/hessian/ phi is not finite: phi = %s' % phi)
# Make sure t is valid
if not np.isreal(t):
raise ControlledError('/hessian/ t is not real: t = %s' % t)
if not np.isfinite(t):
raise ControlledError('/hessian/ t is not finite: t = %s' % t)
# Make sure regularized is valid
if not isinstance(regularized, bool):
raise ControlledError('/hessian/ regularized must be a boolean: regularized = %s' % type(regularized))
G = 1. * len(R)
quasiQ = utils.field_to_quasiprob(phi)
Delta_sparse = Delta.get_sparse_matrix()
H = sp.exp(-t) * Delta_sparse + G * diags(quasiQ, 0)
if regularized:
H += diags(np.ones(int(G)), 0) / (N * PHI_STD_REG ** 2)
# Make sure H is valid ?
return H
# Compute the log of ptgd at maxent
def log_ptgd_at_maxent(phi_M, R, Delta, N, Z_eval, num_Z_samples):
# Make sure phi_M is valid
if not all(np.isreal(phi_M)):
raise ControlledError('/log_ptgd_at_maxent/ phi_M is not real: phi_M = %s' % phi_M)
if not all(np.isfinite(phi_M)):
raise ControlledError('/log_ptgd_at_maxent/ phi_M is not finite: phi_M = %s' % phi_M)
kernel_dim = Delta._kernel_dim
M = utils.field_to_prob(phi_M)
M_on_kernel = sp.zeros([kernel_dim, kernel_dim])
kernel_basis = Delta._kernel_basis
lambdas = Delta._eigenvalues
for a in range(int(kernel_dim)):
for b in range(int(kernel_dim)):
psi_a = sp.ravel(kernel_basis[:, a])
psi_b = sp.ravel(kernel_basis[:, b])
M_on_kernel[a, b] = sp.sum(psi_a * psi_b * M)
# Compute log Occam factor at infinity
log_Occam_at_infty = - 0.5 * sp.log(det(M_on_kernel)) - 0.5 * sp.sum(sp.log(lambdas[kernel_dim:]))
# Make sure log_Occam_at_infty is valid
if not np.isreal(log_Occam_at_infty):
raise ControlledError('/log_ptgd_at_maxent/ log_Occam_at_infty is not real: log_Occam_at_infty = %s' %
log_Occam_at_infty)
if not np.isfinite(log_Occam_at_infty):
raise ControlledError('/log_ptgd_at_maxent/ log_Occam_at_infty is not finite: log_Occam_at_infty = %s' %
log_Occam_at_infty)
# Compute the log likelihood at infinity
log_likelihood_at_infty = - N * sp.sum(phi_M * R) - N
# Make sure log_likelihood_at_infty is valid
if not np.isreal(log_likelihood_at_infty):
raise ControlledError('/log_ptgd_at_maxent/ log_likelihood_at_infty is not real: log_likelihood_at_infty = %s' %
log_likelihood_at_infty)
if not np.isfinite(log_likelihood_at_infty):
raise ControlledError('/log_ptgd_at_maxent/ log_likelihood_at_infty is not finite: log_likelihood_at_infty = %s' %
log_likelihood_at_infty)
# Compute the log posterior (not sure this is right)
log_ptgd_at_maxent = log_likelihood_at_infty + log_Occam_at_infty
# If requested, incorporate corrections to the partition function
t = -np.inf
num_samples = num_Z_samples
if Z_eval == 'Lap':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
0.0, 1.0, 0.0
if Z_eval == 'Lap+Imp':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.Laplace_approach(phi_M, R, Delta, t, N, num_samples, go_parallel=False)
if Z_eval == 'Lap+Imp+P':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.Laplace_approach(phi_M, R, Delta, t, N, num_samples, go_parallel=True)
if Z_eval == 'GLap':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi_M, R, Delta, t, N, num_samples, go_parallel=False, sampling=False)
if Z_eval == 'GLap+P':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi_M, R, Delta, t, N, num_samples, go_parallel=True, sampling=False)
if Z_eval == 'GLap+Sam':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi_M, R, Delta, t, N, num_samples, go_parallel=False, sampling=True)
if Z_eval == 'GLap+Sam+P':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi_M, R, Delta, t, N, num_samples, go_parallel=True, sampling=True)
if Z_eval == 'Lap+Fey':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.Feynman_diagrams(phi_M, R, Delta, t, N)
# Make sure log_Z_correction is valid
if not np.isreal(log_Z_correction):
raise ControlledError('/log_ptgd_at_maxent/ log_Z_correction is not real: correction = %s' % correction)
if not np.isfinite(log_Z_correction):
raise ControlledError('/log_ptgd_at_maxent/ log_Z_correction is not finite: correction = %s' % correction)
log_ptgd_at_maxent += log_Z_correction
return log_ptgd_at_maxent, log_Z_correction, w_sample_mean, w_sample_mean_std
# Computes the log of ptgd at t
def log_ptgd(phi, R, Delta, t, N, Z_eval, num_Z_samples):
# Make sure phi is valid
if not all(np.isreal(phi)):
raise ControlledError('/log_ptgd/ phi is not real: phi = %s' % phi)
if not all(np.isfinite(phi)):
raise ControlledError('/log_ptgd/ phi is not finite: phi = %s' % phi)
# Make sure t is valid
if not np.isreal(t):
raise ControlledError('/log_ptgd/ t is not real: t = %s' % t)
if not np.isfinite(t):
raise ControlledError('/log_ptgd/ t is not finite: t = %s' % t)
G = 1. * len(phi)
alpha = 1. * Delta._alpha
kernel_dim = 1. * Delta._kernel_dim
H = hessian(phi, R, Delta, t, N)
H_prime = H.todense() * sp.exp(t)
S = action(phi, R, Delta, t, N)
# First try computing log determinant straight away
log_det = sp.log(det(H_prime))
# If failed, try computing the sum of eigenvalues, forcing the eigenvalues to be real and non-negative
if not (np.isreal(log_det) and np.isfinite(log_det)):
lambdas = abs(eigvalsh(H_prime))
log_det = sp.sum(sp.log(lambdas))
# Make sure log_det is valid
if not np.isreal(log_det):
raise ControlledError('/log_ptgd/ log_det is not real at t = %s: log_det = %s' % (t, log_det))
if not np.isfinite(log_det):
raise ControlledError('/log_ptgd/ log_det is not finite at t = %s: log_det = %s' % (t, log_det))
# Compute contribution from finite t
log_ptgd = -(N / G) * S + 0.5 * kernel_dim * t - 0.5 * log_det
# Make sure log_ptgd is valid
if not np.isreal(log_ptgd):
raise ControlledError('/log_ptgd/ log_ptgd is not real at t = %s: log_ptgd = %s' % (t, log_ptgd))
if not np.isfinite(log_ptgd):
raise ControlledError('/log_ptgd/ log_ptgd is not finite at t = %s: log_ptgd = %s' % (t, log_ptgd))
# If requested, incorporate corrections to the partition function
num_samples = num_Z_samples
if Z_eval == 'Lap':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
0.0, 1.0, 0.0
if Z_eval == 'Lap+Imp':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.Laplace_approach(phi, R, Delta, t, N, num_samples, go_parallel=False)
if Z_eval == 'Lap+Imp+P':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.Laplace_approach(phi, R, Delta, t, N, num_samples, go_parallel=True)
if Z_eval == 'GLap':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi, R, Delta, t, N, num_samples, go_parallel=False, sampling=False)
if Z_eval == 'GLap+P':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi, R, Delta, t, N, num_samples, go_parallel=True, sampling=False)
if Z_eval == 'GLap+Sam':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi, R, Delta, t, N, num_samples, go_parallel=False, sampling=True)
if Z_eval == 'GLap+Sam+P':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.GLaplace_approach(phi, R, Delta, t, N, num_samples, go_parallel=True, sampling=True)
if Z_eval == 'Lap+Fey':
log_Z_correction, w_sample_mean, w_sample_mean_std = \
supplements.Feynman_diagrams(phi, R, Delta, t, N)
# Make sure log_Z_correction is valid
if not np.isreal(log_Z_correction):
raise ControlledError('/log_ptgd/ log_Z_correction is not real at t = %s: correction = %s' % (t, correction))
if not np.isfinite(log_Z_correction):
raise ControlledError('/log_ptgd/ log_Z_correction is not finite at t = %s: correction = %s' % (t, correction))
log_ptgd += log_Z_correction
details = Results()
details.S = S
details.N = N
details.G = G
details.kernel_dim = kernel_dim
details.t = t
details.log_det = log_det
details.phi = phi
return log_ptgd, log_Z_correction, w_sample_mean, w_sample_mean_std
# Computes predictor step
def compute_predictor_step(phi, R, Delta, t, N, direction, resolution, DT_MAX):
# Make sure phi is valid
if not all(np.isreal(phi)):
raise ControlledError('/compute_predictor_step/ phi is not real: phi = %s' % phi)
if not all(np.isfinite(phi)):
raise ControlledError('/compute_predictor_step/ phi is not finite: phi = %s' % phi)
# Make sure t is valid
if not np.isreal(t):
raise ControlledError('/compute_predictor_step/ t is not real: t = %s' % t)
if not np.isfinite(t):
raise ControlledError('/compute_predictor_step/ t is not finite: t = %s' % t)
# Make sure direction is valid
if not ((direction == 1) or (direction == -1)):
raise ControlledError('/compute_predictor_step/ direction must be just a sign: direction = %s' % direction)
# Get current probability distribution
Q = utils.field_to_prob(phi)
G = 1. * len(Q)
# Get hessian
H = hessian(phi, R, Delta, t, N)
# Compute rho, which indicates direction of step
rho = G * spsolve(H, Q - R)
# Make sure rho is valid
if not all(np.isreal(rho)):
raise ControlledError('/compute_predictor_step/ rho is not real at t = %s: rho = %s' % (t, rho))
if not all(np.isfinite(rho)):
raise ControlledError('/compute_predictor_step/ rho is not finite at t = %s: rho = %s' % (t, rho))
denom = sp.sqrt(sp.sum(rho * Q * rho))
# Make sure denom is valid
if not np.isreal(denom):
raise ControlledError('/compute_predictor_step/ denom is not real at t = %s: denom = %s' % (t, denom))
if not np.isfinite(denom):
raise ControlledError('/compute_predictor_step/ denom is not finite at t = %s: denom = %s' % (t, denom))
if not (denom > 0):
raise ControlledError('/compute_predictor_step/ denom is not positive at t = %s: denom = %s' % (t, denom))
# Compute dt based on value of epsilon (the resolution)
dt = direction * resolution / denom
while abs(dt) > DT_MAX:
dt /= 2.0
# Return phi_new and new t_new. WARNING: IT IS NOT YET CLEAR THAT PHI_NEW ISN'T INSANE
phi_new = phi + rho * dt
t_new = t + dt
# Make sure phi_new is valid
if not all(np.isreal(phi_new)):
raise ControlledError('/compute_predictor_step/ phi_new is not real at t_new = %s: phi_new = %s' % (t_new, phi_new))
if not all(np.isfinite(phi_new)):
raise ControlledError('/compute_predictor_step/ phi_new is not finite at t_new = %s: phi_new = %s' % (t_new, phi_new))
# Make sure t_new is valid
if not np.isreal(t_new):
raise ControlledError('/compute_predictor_step/ t_new is not real: t_new = %s' % t_new)
if not np.isfinite(t_new):
raise ControlledError('/compute_predictor_step/ t_new is not finite: t_new = %s' % t_new)
return phi_new, t_new
# Computes corrector step
def compute_corrector_step(phi, R, Delta, t, N, tollerance, report_num_steps=False):
# Make sure phi is valid
if not all(np.isreal(phi)):
raise ControlledError('/compute_corrector_step/ phi is not real: phi = %s' % phi)
if not all(np.isfinite(phi)):
raise ControlledError('/compute_corrector_step/ phi is not finite: phi = %s' % phi)
# Make sure t is valid
if not np.isreal(t):
raise ControlledError('/compute_corrector_step/ t is not real: t = %s' % t)
if not np.isfinite(t):
raise ControlledError('/compute_corrector_step/ t is not finite: t = %s' % t)
# Make sure report_num_steps is valid
if not isinstance(report_num_steps, bool):
raise ControlledError('/compute_corrector_step/ report_num_steps must be a boolean: report_num_steps = %s' %
type(report_num_steps))
# Evaluate the probability distribution
Q = utils.field_to_prob(phi)
# Evaluate action
S = action(phi, R, Delta, t, N)
# Perform corrector steps until phi converges
num_corrector_steps = 0
num_backtracks = 0
while True:
# Compute the gradient
v = gradient(phi, R, Delta, t, N)
# Compute the hessian
H = hessian(phi, R, Delta, t, N)
# Solve linear equation to get change in field
dphi = -spsolve(H, v)
# Make sure dphi is valid
if not all(np.isreal(dphi)):
raise ControlledError('/compute_corrector_step/ dphi is not real at t = %s: dphi = %s' % (t, dphi))
if not all(np.isfinite(dphi)):
raise ControlledError('/compute_corrector_step/ dphi is not finite at t = %s: dphi = %s' % (t, dphi))
# Compute corresponding change in action
dS = sp.sum(dphi * v)
# If we're already very close to the max, then dS will be close to zero. In this case, we're done already
if dS > MAX_DS:
break
# Reduce step size until in linear regime
beta = 1.0
while True:
# Make sure beta is valid
if beta < 1E-50:
raise ControlledError('/compute_corrector_step/ phi is not converging at t = %s: beta = %s' % (t, beta))
# Compute new phi
phi_new = phi + beta * dphi
# If new phi is insane, decrease beta
if any(phi_new < PHI_MIN) or any(phi_new > PHI_MAX):
num_backtracks += 1
beta *= 0.5
continue
# Compute new action
S_new = action(phi_new, R, Delta, t, N)
# Check for linear regime
if S_new - S <= 0.5 * beta * dS:
break
# If not in linear regime, backtrack value of beta
else:
num_backtracks += 1
beta *= 0.5
continue
# Make sure phi_new is valid
if not all(np.isreal(phi_new)):
raise ControlledError('/compute_corrector_step/ phi_new is not real at t = %s: phi_new = %s' % (t, phi_new))
if not all(np.isfinite(phi_new)):
raise ControlledError('/compute_corrector_step/ phi_new is not finite at t = %s: phi_new = %s' % (t, phi_new))
# Compute new Q
Q_new = utils.field_to_prob(phi_new)
# Break out of loop if Q_new is close enough to Q
gd = utils.geo_dist(Q_new, Q)
if gd < tollerance:
break
# Break out of loop with warning if S_new > S.
# Should not happen, but not fatal if it does. Just means less precision
# ACTUALLY, THIS SHOULD NEVER HAPPEN!
elif S_new - S > 0:
raise ControlledError('/compute_corrector_step/ S_new > S at t = %s: terminating corrector steps' % t)
# Otherwise, continue with corrector step
else:
# New phi, Q, and S values have already been computed
phi = phi_new
Q = Q_new
S = S_new
num_corrector_steps += 1
# After corrector loop has finished, return field
if report_num_steps:
return phi, num_corrector_steps, num_backtracks
else:
return phi
# The core algorithm of DEFT, used for both 1D and 2D density estimation
def compute_map_curve(N, R, Delta, Z_eval, num_Z_samples, t_start, DT_MAX, print_t, tollerance, resolution, max_log_evidence_ratio_drop):
""" Traces the map curve in both directions
Args:
R (numpy.narray):
The data histogram
Delta (Smoothness_operator instance):
Effectiely defines smoothness
resolution (float):
Specifies max distance between neighboring points on the
MAP curve
Returns:
map_curve (list): A list of MAP_curve_points
"""
# Get number of gridpoints and kernel dimension from smoothness operator
G = Delta.get_G()
alpha = Delta._alpha
kernel_basis = Delta.get_kernel_basis()
kernel_dim = Delta.get_kernel_dim()
# Initialize MAP curve
map_curve = MAP_curve()
#
# First compute histogram stuff
#
# Get normalized histogram and corresponding field
R = R / sum(R)
phi_R = utils.prob_to_field(R)
log_E_R = -np.Inf
t_R = np.Inf
log_Z_correction_R = 0.0
w_sample_mean_R = 1.0
w_sample_mean_std_R = 0.0
map_curve.add_point(t_R, phi_R, R, log_E_R, log_Z_correction_R, w_sample_mean_R, w_sample_mean_std_R)
#
# Then compute maxent stuff
#
# Compute the maxent field and density
phi_infty, success = maxent.compute_maxent_field(R, kernel_basis)
# Convert maxent field to probability distribution
M = utils.field_to_prob(phi_infty)
# Compute the maxent log_ptgd. Important to keep this around to compute log_E at finite t
log_ptgd_M, log_Z_correction_M, w_sample_mean_M, w_sample_mean_std_M = \
log_ptgd_at_maxent(phi_infty, R, Delta, N, Z_eval, num_Z_samples)
# This corresponds to a log_E of zero
log_E_M = 0
t_M = -sp.Inf
map_curve.add_point(t_M, phi_infty, M, log_E_M, log_Z_correction_M, w_sample_mean_M, w_sample_mean_std_M)
# Set maximum log evidence ratio so far encountered
log_E_max = -np.Inf
#
# Now compute starting point
#
# Compute phi_start by executing a corrector step starting at maxent dist
phi_start = compute_corrector_step(phi_infty, R, Delta, t_start, N, tollerance)
# Convert starting field to probability distribution
Q_start = utils.field_to_prob(phi_start)
# Compute log ptgd
log_ptgd_start, log_Z_correction_start, w_sample_mean_start, w_sample_mean_std_start = \
log_ptgd(phi_start, R, Delta, t_start, N, Z_eval, num_Z_samples)
# Compute corresponding evidence ratio
log_E_start = log_ptgd_start - log_ptgd_M
# Adjust max log evidence ratio
log_E_max = log_E_start if (log_E_start > log_E_max) else log_E_max
# Set start as first MAP curve point
if print_t:
print('t = %.2f' % t_start)
map_curve.add_point(t_start, phi_start, Q_start, log_E_start, log_Z_correction_start, w_sample_mean_start, w_sample_mean_std_start)
#
# Finally trace along the MAP curve
#
# This is to indicate how iteration in t is terminated
break_t_loop = [True, True] # = [Q_M, Q_R]; True = thru geo_dist, False = thru log_E
# Trace MAP curve in both directions
for direction in [-1, +1]:
# Start iteration from central point
phi = phi_start
t = t_start
Q = Q_start
log_E = log_E_start
log_Z_correction = log_Z_correction_start
w_sample_mean = w_sample_mean_start
w_sample_mean_std_dev = w_sample_mean_std_start
if direction == -1:
Q_end = M
else:
Q_end = R
log_ptgd0 = log_ptgd_start
slope = np.sign(0)
# Keep stepping in direction until reach the specified endpoint
while True:
# Test distance to endpoint
if utils.geo_dist(Q_end, Q) <= resolution:
if direction == -1:
pass
#print('Q_end = M: geo_dist (%.2f) <= resolution (%.2f)' % (utils.geo_dist(Q_end, Q), resolution))
else:
pass
#print('Q_end = R: geo_dist (%.2f) <= resolution (%.2f)' % (utils.geo_dist(Q_end, Q), resolution))
break
# Take predictor step
phi_pre, t_new = compute_predictor_step(phi, R, Delta, t, N, direction, resolution, DT_MAX)
# If phi_pre is insane, start iterating from phi instead
if any(phi_pre > PHI_MAX) or any(phi_pre < PHI_MIN):
phi_pre = phi
# Perform corrector steps to get new phi
phi_new = compute_corrector_step(phi_pre, R, Delta, t_new, N, tollerance)
# Compute new distribution
Q_new = utils.field_to_prob(phi_new)
# Compute log ptgd
log_ptgd_new, log_Z_correction_new, w_sample_mean_new, w_sample_mean_std_new = \
log_ptgd(phi_new, R, Delta, t_new, N, Z_eval, num_Z_samples)
# Compute corresponding evidence ratio
log_E_new = log_ptgd_new - log_ptgd_M
# Take step
t = t_new
Q = Q_new
phi = phi_new
log_E = log_E_new
log_Z_correction = log_Z_correction_new
w_sample_mean = w_sample_mean_new
w_sample_mean_std = w_sample_mean_std_new
# Adjust max log evidence ratio
log_E_max = log_E if (log_E > log_E_max) else log_E_max
# Terminate if log_E is too small. But don't count the t=-inf endpoint when computing log_E_max
if log_E_new < log_E_max - max_log_evidence_ratio_drop:
if direction == -1:
#print('Q_end = M: log_E (%.2f) < log_E_max (%.2f) - max_log_evidence_ratio_drop (%.2f)' %
# (log_E_new, log_E_max, max_log_evidence_ratio_drop))
break_t_loop[0] = False
else:
#print('Q_end = R: log_E (%.2f) < log_E_max (%.2f) - max_log_evidence_ratio_drop (%.2f)' %
# (log_E_new, log_E_max, max_log_evidence_ratio_drop))
break_t_loop[1] = False
# Add new point to map curve
if print_t:
print('t = %.2f' % t)
map_curve.add_point(t, phi, Q, log_E, log_Z_correction, w_sample_mean, w_sample_mean_std)
break
slope_new = np.sign(log_ptgd_new - log_ptgd0)
# Terminate if t is too negative or too positive
if t < T_MIN:
#print('Q_end = M: t (%.2f) < T_MIN (%.2f)' % (t, T_MIN))
break_t_loop[0] = False
break
elif t > T_MAX:
#print('Q_end = R: t (%.2f) > T_MAX (%.2f)' % (t, T_MAX))
break_t_loop[1] = False
break
elif (direction == +1) and (t > 0) and (np.sign(slope_new * slope) < 0) and (log_ptgd_new > log_ptgd0):
#print('Q_end = R: t (%.2f) > 0 and log_ptgd_new (%.2f) > log_ptgd (%.2f) wrongly' %
# (t, log_ptgd_new, log_ptgd0))
break_t_loop[1] = False
break
elif (direction == +1) and (np.sign(slope_new * slope) < 0) and (log_ptgd_new > log_ptgd0 + max_log_evidence_ratio_drop):
#print('Q_end = R: log_ptgd_new (%.2f) > log_ptgd (%.2f) + max_log_evidence_ratio_drop (%.2f) at t = %.2f' %
# (log_ptgd_new, log_ptgd0, max_log_evidence_ratio_drop, t))
break_t_loop[1] = False
break
log_ptgd0 = log_ptgd_new
slope = slope_new
# Add new point to MAP curve
if print_t:
print('t = %.2f' % t)
map_curve.add_point(t, phi, Q, log_E, log_Z_correction, w_sample_mean, w_sample_mean_std)
# Sort points along the MAP curve
map_curve.sort()
map_curve.t_start = t_start
map_curve.break_t_loop = break_t_loop
# Return the MAP curve to the user
return map_curve
#
# Compute the K coefficient (Kinney 2015 PRE, Eq. 12)
#
def _compute_K_coeff(res):
# Compute the spectrum of Delta
Delta = res.Delta.get_dense_matrix()
alpha = int(-Delta[0, 1])
lambdas, psis = eigh(Delta) # Columns of psi are eigenvectors
original_psis = sp.array(psis)
R = res.R
M = res.M
N = res.N
G = len(R)
# Get normalized M and R, with unit grid spacing
M = sp.array(M / sp.sum(M)).T
R = sp.array(R / sp.sum(R)).T
# Diagonalize first alpha psis with respect to diag_M
# This does the trick
diag_M_mat = sp.mat(sp.diag(M))
psis_ker_mat = sp.mat(original_psis[:, :alpha])
diag_M_ker = psis_ker_mat.T * diag_M_mat * psis_ker_mat
omegas, psis_ker_coeffs = eigh(diag_M_ker)
psis = original_psis.copy()
psis[:, :alpha] = psis_ker_mat * psis_ker_coeffs
# Now compute relevant coefficients
# i: range(G)
# j,k: range(alpha)
v_is = sp.array([sp.sum((M - R) * psis[:, i]) for i in range(G)])
z_iis = sp.array([sp.sum(M * psis[:, i] * psis[:, i]) for i in range(G)])
z_ijs = sp.array(
[[sp.sum(M * psis[:, i] * psis[:, j]) for j in range(alpha)] for i in
range(G)])
z_ijks = sp.array([[[sp.sum(M * psis[:, i] * psis[:, j] * psis[:, k]) for j
in range(alpha)] for k in range(alpha)] for i in
range(G)])
K_pos_terms = sp.array(
[(N * v_is[i] ** 2) / (2 * lambdas[i]) for i in range(alpha, G)])
K_neg_terms = sp.array(
[(-z_iis[i]) / (2 * lambdas[i]) for i in range(alpha, G)])
K_ker1_terms = sp.array([sum(
[z_ijs[i, j] ** 2 / (2 * lambdas[i] * omegas[j]) for j in range(alpha)])
for i in range(alpha, G)])
K_ker2_terms = sp.array([sum(
[v_is[i] * z_ijks[i, j, j] / (2 * lambdas[i] * omegas[j]) for j in
range(alpha)]) for i in range(alpha, G)])
K_ker3_terms = sp.array([sum([sum([-v_is[i] * z_ijs[i, j] * z_ijks[
j, k, k] / (2 * lambdas[i] * omegas[k] * omegas[j]) for j in
range(alpha)]) for k in range(alpha)])
for i in range(alpha, G)])
# I THINK THIS IS RIGHT!!!
K_coeff = K_pos_terms.sum() + K_neg_terms.sum() + K_ker1_terms.sum() + K_ker2_terms.sum() + K_ker3_terms.sum()
# Return the K coefficient
return K_coeff
#
# Core DEFT algorithm
#
def run(counts_array, Delta, Z_eval, num_Z_samples, t_start, DT_MAX, print_t,
tollerance, resolution, num_pt_samples, fix_t_at_t_star,
max_log_evidence_ratio_drop, compute_K_coeff, details=False):
"""
The core algorithm of DEFT, used for both 1D and 2D density estmation.
Args:
counts_array (numpy.ndarray):
A scipy array of counts. All counts must be nonnegative.
Delta (Smoothness_operator instance):
An operator providing the definition of 'smoothness' used by DEFT
"""
# Make sure details is valid
if not isinstance(details, bool):
raise ControlledError('/deft_core._run/ details must be a boolean: details = %s' % type(details))
# Get number of gridpoints and kernel dimension from smoothness operator
G = Delta.get_G()
kernel_dim = Delta.get_kernel_dim()
# Make sure counts_array is valid
if not (len(counts_array) == G):
raise ControlledError('/deft_core._run/ counts_array must have length %d: len(counts_array) = %d' %
(G, len(counts_array)))
if not all(counts_array >= 0):
raise ControlledError('/deft_core._run/ counts_array is not non-negative: counts_array = %s' % counts_array)
if not (sum(counts_array > 0) > kernel_dim):
raise ControlledError('/deft_core._run/ Only %d elements of counts_array contain data, less than kernel dimension %d' %
(sum(counts_array > 0), kernel_dim))
# Get number of data points and normalize histogram
N = sum(counts_array)
R = 1.0 * counts_array / N
#
# Compute the MAP curve
#
start_time = time.time()
map_curve = compute_map_curve(N, R, Delta, Z_eval, num_Z_samples, t_start, DT_MAX, print_t, tollerance, resolution,max_log_evidence_ratio_drop)
end_time = time.time()
map_curve_compute_time = end_time - start_time
if print_t:
print('MAP curve computation took %.2f sec' % map_curve_compute_time)
# Identify the optimal density estimate
points = map_curve.points
log_Es = sp.array([p.log_E for p in points])
log_E_max = log_Es.max()
ibest = log_Es.argmax()
star = points[ibest]
Q_star = np.copy(star.Q)
t_star = star.t
phi_star = np.copy(star.phi)
map_curve.i_star = ibest
#
# Do posterior sampling
#
if not (num_pt_samples == 0):
Q_samples, phi_samples, phi_weights = \
supplements.posterior_sampling(points, R, Delta, N, G,
num_pt_samples, fix_t_at_t_star)
#
# Package results
#
# Create a container
results = Results()
# Fill in info that's guaranteed to be there
results.Delta = Delta
results.phi_star = phi_star
results.Q_star = Q_star
results.R = R
results.map_curve = map_curve
results.map_curve_compute_time = map_curve_compute_time
results.G = G
results.N = N
results.t_star = t_star
results.i_star = ibest
results.counts = counts_array
results.tollerance = tollerance
results.resolution = resolution
results.points = points
# Get maxent point
maxent_point = results.map_curve.get_maxent_point()
results.M = maxent_point.Q / np.sum(maxent_point.Q)
# Compute K coefficient if requested
if compute_K_coeff:
results.K_coeff = _compute_K_coeff(results)
else:
results.K_coeff = None
# Include posterior sampling info if any sampling was performed
if not (num_pt_samples == 0):
results.num_pt_samples = num_pt_samples
results.Q_samples = Q_samples
results.phi_samples = phi_samples
results.phi_weights = phi_weights
# Return density estimate along with histogram on which it is based
return results
|
import numpy as np
import pandas as pd
from viterbi import viterbi
def dishonest_casino():
# Stati nascosti e osservabili
S = np.array(["F", "L"])
SY = np.arange(1,7)
# Matrice transizione
M = pd.DataFrame([[0.95, 0.05], [0.1, 0.9]], columns = S, index = S)
# Matrice probabilita' di emissione
E = pd.DataFrame([[1/6] * 6, [1/10] * 5 + [1/2]], columns = SY, index = S)
# Probabilita' iniziali
pinizio = pd.DataFrame([[1., 0.]], columns = S)
# Sequenza di lanci osservata
o = np.array([1,2,1,2,2,3,4,5,3,4,6,6,5,4,2,1,3,4,6,6,1,6,6,6,6,2,6,5,1,3,2,2,1])
path = viterbi(M, E, S, pinizio, o)
print(path)
if __name__ == "__main__":
dishonest_casino()
|
import requests
from PythonApi.jotihunt.Base import Response, NIEUWS, OPDRACHT, NIEUWSLIJST,\
HINTS, HINT, OPDRACHTEN, SCORELIJST, VOSSEN
_base_url = "http://www.jotihunt.net/api/1.0/"
def get_nieuws(nieuws_id):
url = _base_url + "nieuws/" + str(nieuws_id)
r = requests.get(url)
json = r.json()
return Response(json, NIEUWS)
def get_opdracht(opdracht_id):
url = _base_url + "opdracht/" + str(opdracht_id)
r = requests.get(url)
return Response(r.json(), OPDRACHT)
def get_hint(hint_id):
url = _base_url + "hint/" + str(hint_id)
r = requests.get(url)
return Response(r.json(), HINT)
def get_nieuws_lijst():
url = _base_url + "nieuws"
r = requests.get(url)
json = r.json()
return Response(json, NIEUWSLIJST)
def get_opdrachten():
url = _base_url + "opdracht"
r = requests.get(url)
return Response(r.json(), OPDRACHTEN)
def get_hints():
url = _base_url + "hint"
r = requests.get(url)
return Response(r.json(), HINTS)
def get_scorelijst():
url = _base_url + "scorelijst"
r = requests.get(url)
return Response(r.json(), SCORELIJST)
def get_vossen():
url = _base_url + "vossen"
r = requests.get(url)
return Response(r.json(), VOSSEN)
|
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
# 构建模型
class Sequential(nn.Module):
batch_size = 1
num_workers = 1
shuffle = True
layer_num = 0
def __init__(self):
super(Sequential, self).__init__()
if torch.cuda.is_available():
self.cuda()
else:
self.cpu()
self.non_liner_sequential = nn.Sequential()
self.liner_sequential = nn.Sequential()
def add(self, layer=None, activation=None):
if True:
self.layer_num = self.layer_num + 1
name = 'layer' + str(self.layer_num)
if type(layer) is nn.Linear:
self.liner_sequential.add_module(name=name, module=layer)
else:
self.non_liner_sequential.add_module(name=name, module=layer)
def forward(self, x):
out = self.non_liner_sequential(x)
out = out.view(out.size(0), -1)
out = self.liner_sequential(out)
return out
def compile(self, lr):
# 定义损失函数
self.criterion = nn.CrossEntropyLoss(size_average=False)
# 定义优化器(梯度下降)
self.optimizer = optim.SGD(self.parameters(), lr=lr)
def fit(self, x, y, epochs=2, batch_size=1, num_workers=1, shuffle=True):
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
# 训练模型
self.train()
for i in range(epochs):
running_loss = 0.0
running_acc = 0.0
# 将输入数据集转换成张量
x = torch.from_numpy(x)
y = torch.from_numpy(y)
dataset = TensorDataset(data_tensor=x, target_tensor=y)
dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=self.shuffle, num_workers=self.num_workers)
for (img, label) in dataloader:
# 如果使用CPU
if not torch.cuda.is_available():
img = Variable(img).cpu()
label = Variable(label).cpu()
# 如果使用GPU
else:
img = Variable(img).cuda()
label = Variable(label).cuda()
# 归零操作
self.optimizer.zero_grad()
output = self(img)
loss = self.criterion(output, label)
# 反向传播
loss.backward()
self.optimizer.step()
running_loss += loss.data[0]
_, predict = torch.max(output, 1)
correct_num = (predict == label).sum()
running_acc += correct_num.data[0]
running_loss /= len(x)
running_acc /= len(x)
print('[%d/%d] Loss: %.5f, Acc: %.2f' % (i + 1, epochs, running_loss, running_acc * 100))
def evaluate(self, x, y):
self.eval()
testloss = 0.0
testacc = 0.1
# 将输入数据集转换成张量
x = torch.from_numpy(x)
y = torch.from_numpy(y)
dataset = TensorDataset(data_tensor=x, target_tensor=y)
dataloader = DataLoader(dataset, batch_size=self.batch_size, shuffle=self.shuffle, num_workers=self.num_workers)
for (img, label) in dataloader:
# 如果使用CPU
if not torch.cuda.is_available():
img = Variable(img).cpu()
label = Variable(label).cpu()
# 如果使用GPU
else:
img = Variable(img).cuda()
label = Variable(label).cuda()
output = self(img)
loss = self.criterion(output, label)
_, predict = torch.max(output, 1)
correct_num = (predict == label).sum()
testacc += correct_num.data[0]
testloss /= len(x)
testacc /= len(x)
print('Loss: %.5f, Acc: %.2f' % (testloss, testacc * 100))
def predict(self, x):
print('ToBe implement!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.