text stringlengths 38 1.54M |
|---|
from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
class Appointment(models.Model):
title = models.CharField(max_length=30)
description = models.CharField(max_length=40)
name_of_location = models.CharField(max_length=40)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
due_time = models.DateField()
member = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from dateutil.parser import parse
from datetime import timedelta
import re
def parseTIME(string):
return parse(string)
def parseTIMEDELTA(string):
result = re.match(
"(?P<hours>.+)\:(?P<minutes>.+)\:(?P<seconds>.+)\.(?P<milliseconds>.+)", string)
return timedelta(hours=int(result.groupdict()["hours"]),
minutes=int(result.groupdict()["minutes"]),
seconds=int(result.groupdict()["seconds"]),
milliseconds=int(result.groupdict()["milliseconds"]))
|
from turtle import *
t1 = Turtle()
t2 = Turtle()
t1.forward(100)
t2.pencolor("red")
t2.right(20)
t2.forward(100)
goto(20, 30)
t2.goto(20,30)
goto(20,50)
|
import requests
import cPickle
def api_call(params, port):
params = cPickle.dumps(params, protocol=2)
response = requests.post(url='http://142.0.203.36:%d/api' % port, data=params)
return cPickle.loads(response.content)
|
"""import library"""
from tqdm import tqdm
import time, urllib.request, requests, os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
"""start coding"""
driver = webdriver.Chrome('C:\chromedriver/chromedriver.exe')
url = 'https://shopee.com.my/search?keyword=basketball'
driver.get(url)
click_on_english = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="modal"]/div[1]/div[1]/div/div[3]/div[1]/button'))).click()
time.sleep(5)
# scroll to bottom
"""
pause_time = 5
# get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# calculate new scroll height and compare with the last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
"""
"""
def __scroll_down_page(self, speed=8):
current_scroll_position, new_height= 0, 1
while current_scroll_position <= new_height:
current_scroll_position += speed
self.execute_script("window.scrollTo(0, {});".format(current_scroll_position))
new_height = self.execute_script("return document.body.scrollHeight")
"""
current_scroll_position, new_height= 0, 1
while current_scroll_position <= new_height:
current_scroll_position += speed
self.execute_script("window.scrollTo(0, {});".format(current_scroll_position))
new_height = self.execute_script("return document.body.scrollHeight")
href_list = []
raw = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="main"]/div/div[2]/div[2]/div[2]/div[2]/div[2]')))
__scroll_down_page(driver)
CONDITION = True
while CONDITION:
items = raw.find_elements_by_tag_name('a')
for i in items:
href = i.get_attribute('href')
href_list.append(href)
CONDITION = False
print(href_list)
print(len(href_list))
|
a = 1
b = 3 # Make sure you keep note of the values per variable
for i in range(3):
a = b # indentation = 4 spaces
b = a + 1
print('What are the values of a and b?') |
import logging
import base64
import boto3
import uuid
import time
import json
import requests
from io import BytesIO
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
from chalice import Chalice, Response, BadRequestError
from chalicelib import get_stage
# from chalicelib.db.models import DetectedPeopleModel
from chalicelib.db.models_mysql import DetectedPeopleModel
app = Chalice(app_name='jogo')
app.debug = True
app.log.setLevel(logging.DEBUG)
PICTURE_S3_BUCKET = 'sa-jogo-pictures-{}'.format(get_stage())
@app.route('/')
def index():
response = requests.get('http://httpbin.org/ip')
return response.json()
@app.route('/hi')
def hi():
return {
'hello':'world'
}
@app.route('/stage')
def check_stage():
return {
'stage':get_stage()
}
@app.route('/event/{event_id}')
def get_aggregated_event(event_id):
data = DetectedPeopleModel.query(event_id)
return json.dumps(list(data), default=lambda o: o.__dict__)
@app.route('/upload/{event_id}', methods=['POST'])
def upload_picture(event_id):
if app.current_request.json_body.get('uuid'):
pic_uuid = app.current_request.json_body['uuid']
else:
pic_uuid = str(uuid.uuid4())
selfie = app.current_request.json_body['image']
s3 = boto3.resource('s3')
try:
# move to CloudFormation template or app load
s3.create_bucket(
Bucket=PICTURE_S3_BUCKET,
)
except:
pass
rekognition = boto3.client('rekognition')
s3.Bucket(PICTURE_S3_BUCKET).put_object(
Key='selfies/{}.jpg'.format(pic_uuid),
Body=base64.b64decode(selfie),
)
collection_id = 'sa-jogo-people-{}'.format(get_stage())
try:
rekognition.create_collection(
CollectionId=collection_id,
)
# todo: define exact exception (botocore.errorfactory.ResourceAlreadyExistsException)
except Exception:
pass
response = rekognition.index_faces(
CollectionId=collection_id,
Image={
'S3Object': {
'Bucket': PICTURE_S3_BUCKET,
'Name': 'selfies/{}.jpg'.format(pic_uuid),
},
},
DetectionAttributes=[
'ALL'
]
)
## parse rekognition response
if not len(response['FaceRecords']) > 0:
raise BadRequestError('Could not find valid faces')
total_people = 0
now=int(time.time())
# get image with PIL
image = Image.open(BytesIO(base64.b64decode(selfie))).convert("RGBA")
image_width, image_height = image.size
for face in response['FaceRecords']:
rekognition_face_id = face['Face']['FaceId']
age_high = face['FaceDetail']['AgeRange']['High']
age_low = face['FaceDetail']['AgeRange']['Low']
gender = face['FaceDetail']['Gender']['Value']
gender_score = face['FaceDetail']['Gender']['Confidence']
smile = face['FaceDetail']['Smile']['Value']
smile_score = face['FaceDetail']['Smile']['Confidence']
dom_emotion_score = 0
dom_emotion = None
for emotion in face['FaceDetail']['Emotions']:
if emotion['Confidence'] > dom_emotion_score:
dom_emotion_score = emotion['Confidence']
dom_emotion = emotion['Type']
detected = DetectedPeopleModel(
event_id=event_id,
object_id=pic_uuid,
face_id=rekognition_face_id,
timestamp=now,
dominant_emotion=dom_emotion,
dominant_emotion_score=dom_emotion_score,
smile=smile,
smile_score=smile_score,
age_low=age_low,
age_high=age_high,
gender=gender,
gender_score=gender_score
)
detected.save()
total_people = total_people+1
# FIXME saving image in local file system
# what happens when running in the cloud?
# add bounding boxes
width = image_width * face['FaceDetail']['BoundingBox']['Width']
height = image_height * face['FaceDetail']['BoundingBox']['Height']
left = image_width * face['FaceDetail']['BoundingBox']['Left']
top = image_height * face['FaceDetail']['BoundingBox']['Top']
draw = ImageDraw.Draw(image)
draw.rectangle(((left, top), (left + height, top + width)), outline="red")
# FIXME font path to change image size in picture
# what happens when running in the cloud?
# TODO set font size based on image size
# if image is too big, font size needs to be bigger
font_path = "/Library/Fonts/Arial.ttf"
font = ImageFont.truetype(font_path, 16)
draw.text((left + 10, top - 10), dom_emotion, fill="yellow", font=font)
# export image
# TODO save image in S3
image.save("/Users/sletic/Pictures/JoGoOut/"+pic_uuid+".jpg", "JPEG")
return {
'event_id': event_id,
'object_id': pic_uuid,
'total_rek_people': total_people
}
|
from PIL import Image, ImageDraw, ImageFont
import operator
class ViewModel:
SIZE = (128,64)
FONT = 'consola.ttf'
def generateView(self):
raise NotImplementedError( "Should have implemented this" )
def drawInversedText(self, draw, xy, text, font):
size = font.getsize(text)
tillxy = tuple(map(operator.add, xy, size))
rectanglesize = [xy, tillxy]
# create white rectangle
draw.rectangle(rectanglesize, fill="white", outline="white")
# put black text on top
draw.text(xy, text, font=font, fill="black")
class StatusViewModel(ViewModel):
#isTemperatures: array of float
#setTemperature: float
#agitation: string
#time: string
def __init__(self, isTemperatures = [0], setTemperature = 0, agitation = "unknown", time = "00:00"):
self.isTemperatures = isTemperatures
self.setTemperature = setTemperature
self.agitation = agitation
self.time = time
# writes black text on white background
# useful for menus
def generateView(self):
im = Image.new("1",self.SIZE,0)
draw = ImageDraw.Draw(im)
fnt = ImageFont.truetype(self.FONT, 12)
row = 2
col = 2
#Idea: 2px space at top and left
#draw.text((col,row), "BoJo", font=fnt, fill="white")
self.drawInversedText(draw, (col,row), "BoJo", fnt)
row += 12
#foreach isTemperature -> display
tempString = "Temp: "
for item in self.isTemperatures:
tempString += "{:.2f}".format(item)
tempString += " "
draw.text((col,row), tempString, font=fnt, fill="white")
row += 12
#display target Temperature
draw.text((col,row), "Set : {:.2f}".format(self.setTemperature), font=fnt, fill="white")
row += 12
#Display Agitation
draw.text((col,row), "Move: " + self.agitation, font=fnt, fill="white")
row += 12
#Display Time
draw.text((col,row), "Time: " + self.time, font=fnt, fill="white")
del draw
del fnt
return im
class SetTemperatureViewModel(ViewModel):
TEXT = "Target Temp"
def __init__(self, setTemperature = 30.0, step = 0.05):
self.setTemperature = setTemperature
self.step = step
def increase(self):
self.setTemperature += self.step
def decrease(self):
self.setTemperature -= self.step
def generateView(self):
im = Image.new("1",self.SIZE,0)
draw = ImageDraw.Draw(im)
#Generate 2 Fonts
smallfnt = ImageFont.truetype(self.FONT, 12)
bigfnt = ImageFont.truetype(self.FONT, 32)
row = 6
# Draw Description Text (centered)
draw.text(((self.SIZE[0]-smallfnt.getsize(self.TEXT)[0])/2, row), self.TEXT, font=smallfnt, fill="white")
# 18 Pixels Space
row += 18
#Draw cropped temperature
temp = "{:.2f}".format(self.setTemperature) + "ยฐC"
draw.text(((self.SIZE[0]-bigfnt.getsize(temp)[0])/2, row), temp, font=bigfnt, fill="white")
del draw
del smallfnt
del bigfnt
return im
class MenuViewModel(ViewModel):
def __init__(self, menuItems):
self.menuItems = menuItems
self.selectedMenuItem = 0
def next(self):
self.selectedMenuItem += 1
self.selectedMenuItem %= len(self.menuItems)
def prev(self):
if (self.selectedMenuItem == 0):
self.selectedMenuItem = len(self.menuItems) - 1
else:
self.selectedMenuItem -= 1
# currently maximum 5 entries, because of display limitation
# TODO Scrolling View
def generateView(self):
im = Image.new("1",self.SIZE,0)
draw = ImageDraw.Draw(im)
fnt = ImageFont.truetype(self.FONT, 12)
row = 2
col = 8
for index,item in enumerate(self.menuItems):
# Selected Item in Inverse Colors
if (index == self.selectedMenuItem):
self.drawInversedText(draw, (col,row), item, fnt)
# Non Selected Items in Regular color
else:
draw.text((col,row), item, font=fnt, fill="white")
row += 12
del draw
del fnt
return im
|
import bs4
import requests
import csv
writerFileHandle = open("data.csv", "w", newline='')
writer1 = csv.writer(writerFileHandle)
requestObj = requests.get("http://www.weather.gov.sg/weather-currentobservations-temperature")
requestObj.raise_for_status()
soup = bs4.BeautifulSoup(requestObj.text, 'html.parser')
data = soup.find("div", {"id": "sg_region_popover"})
children = data.findChildren("span" , recursive=False)
towns = []
for i in children:
tmp = i["data-content"]
marker1 = tmp.find("<strong>")
marker2 = tmp.find("</strong>")
location = tmp[marker1 + 8:marker2]
writer1.writerow([location, i.text])
towns.append([location, i.text])
y = {}
for (k,v) in towns:
print ("Key:" + k + " " + "Value:" + v)
writerFileHandle.close() |
"""
Divide two integers without using multiplication, division and mod operator.
If it is overflow, return MAX_INT.
"""
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
MAX_INT = 2147483647
MIN_INT = -2147483648
if (divisor == 0) or (divisor == -1 and dividend == MIN_INT):
return MAX_INT
if (dividend > 0 and divisor > 0) or (dividend < 0 and divisor < 0):
sign = 1
else:
sign = -1
dividend = abs(dividend)
divisor = abs(divisor)
vals = []
while dividend >= divisor:
vals.insert(0, divisor)
divisor += divisor
res = 0
for index, val in enumerate(vals):
if dividend >= val:
dividend -= val
res += 2**(len(vals)-1-index)
return sign*res
s = Solution()
print s.divide(19, 3)
|
from flask.blueprints import Blueprint
from flask import render_template
from flask import request
from managers.dbService import DatabaseManager
from extensions import db
db_manager = DatabaseManager(db)
addGroup = Blueprint('addGroup', __name__,
template_folder='templates',
static_folder='static')
@addGroup.route('/addGroup')
def getGroupTemplate():
return render_template('addGroup.html')
@addGroup.route('/addGroup', methods=['post', 'get'])
def addGroupRoute():
if request.method == 'POST':
name = request.form.get('name')
if name:
message = "Correct data"
db_manager.add_group(name=name)
else:
message = "Wrong data"
return render_template('addGroup.html', message=message)
|
import requests
import os.path
from unrar import rarfile
from clint.textui import progress
fias_url = 'https://fias-file.nalog.ru/ExportDownloads?file=5158f5b0-3e7a-44a4-acf9-efaddee71fe2'
fias_file = 'fias_db.rar'
def extract_addrob(file_path):
r_file = rarfile.RarFile(file_path)
for f in r_file.infolist():
if f.filename.startswith('ADDROB'):
#print (f.filename, f.file_size)
r_file.extract(f)
if not os.path.isfile(fias_file):
r = requests.get(fias_url, allow_redirects=True, stream=True)
with open("fias_db.rar", "wb") as Pypdf:
total_length = int(r.headers.get('content-length'))
for ch in progress.bar(r.iter_content(chunk_size = 1024), expected_size=(total_length/1024) + 1):
if ch:
Pypdf.write(ch)
extract_addrob(fias_file)
|
import fileinput
# input is .txt list of coordinates in form: x,y
# output is .txt list of code to paste on arduino IDE
X = []
Y = []
every_n = 2 # remove every other pixel to increase refresh rate
i = 0
for line in fileinput.input():
i += 1
if i % every_n != 0: continue
x, y = line.strip().split(",")
X.append(x)
Y.append(y)
print("const unsigned long x_points[NUM_POINTS] = {%s};" % ','.join(X))
print("const unsigned long y_points[NUM_POINTS] = {%s};" % ','.join(Y))
print(len(X))
#call python trimpoints.py inputlist.txt > arduino_list.txt
|
#!/usr/bin/env python3
# Resilience
#Problem 243
#A positive fraction whose numerator is less than its denominator is called a proper fraction.
#For any denominator, d, there will be dโ1 proper fractions; for example, with dโ=โ12:
#1/12 , 2/12 , 3/12 , 4/12 , 5/12 , 6/12 , 7/12 , 8/12 , 9/12 , 10/12 , 11/12 .
#We shall call a fraction that cannot be cancelled down a resilient fraction.
#Furthermore we shall define the resilience of a denominator, R(d), to be the ratio of its proper fractions that are resilient; for example, R(12) = 4/11 .
#In fact, dโ=โ12 is the smallest denominator having a resilience R(d) < 4/10 .
#Find the smallest denominator d, having a resilience R(d) < 15499/94744 .
# this needs euliers totient function
import math
import random
import time
s1=time.time()
#perform a Modular exponentiation
def modular_pow(base, exponent, modulus):
result=1
while exponent>0:
if exponent%2==1:
result=(result * base)%modulus
exponent=exponent>>1
base=(base * base)%modulus
return result
#Miller-Rabin primality test
def checkMillerRabin(n,k):
if n==2: return True
if n==1 or n%2==0: return False
#find s and d, with d odd
s=0
d=n-1
while(d%2==0):
d/=2
s+=1
assert (2**s*d==n-1)
#witness loop
composite=1
for i in range(k):
a=random.randint(2,n-1)
x=modular_pow(a,d,n)
if x==1 or x==n-1: continue
for j in range(s-1):
composite=1
x=modular_pow(x,2,n)
if x==1: return False #is composite
if x==n-1:
composite=0
break
if composite==1:
return False #is composite
return True #is probably prime
def findPrimes(n): #generate a list of primes, using the sieve of eratosthenes
primes=(n+2)*[True]
for i in range(2,int(math.sqrt(n))+1):
if primes[i]==True:
for j in range(i**2,n+1,i):
primes[j]=False
primes=[i for i in range(2,len(primes)-1) if primes[i]==True]
return primes
def primeFactorization(n,primes): #find the factors of a number
factors=[]
i=0
while(n!=1):
if(n%primes[i]==0):
factors.append(primes[i])
n/=primes[i]
else:
i+=1
return factors
def phi(n,primes):
#some useful properties
if (checkMillerRabin(n,10)==True): #fast prime check
return n-1
factors=primeFactorization(n,primes) #prime factors
distinctive_prime_factors=set(factors)
totient=n
for f in distinctive_prime_factors: #phi = n * sum (1 - 1/p), p is a distinctive prime factor
totient*=(1-1.0/f);
return totient
if __name__ == '__main__':
s=0
N=165975
# N=430000
primes=findPrimes(N) #upper bound for the number of primes
limit = 15499/94744
for i in range(1,N):
a=phi(i,primes)
s+=a
if (i-a-1)/i < limit:
print(i, i-a-1)
print("Sum =",s )
#limit = 15499/94744
#a=True
#i=12
#while a:
# #for i in range(len(a)):
# s = phi(i)
# if (i-phi(i)-1)/i < limit:
# print(i-phi(i)-1,i); a=False
# i+=1
print("{}s".format(time.time() - s1))
#
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline,make_pipeline
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn import model_selection, metrics
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from pandas.core.frame import DataFrame
from sklearn.model_selection import KFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
train = pd.read_csv('/Users/calvin/python/crime project/test_100.csv')
#ๅๅง่จญๅฎ----------------------------------------------------------------------
train.dropna(inplace=True)
train_y=train['Event count']
#all_data = pd.concat([train, test], ignore_index = True)
train.drop(columns=["COPLNT_DAY"])
train_x=train
train_x.drop(columns=["Event count"])
train_x.dropna(axis=0, how='any')
train_x = pd.get_dummies(train_x)
#seperate the dataset----------------------------------------------------------
#case 1
train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=0.5)
#training set
train_x=train_x.values
train_y=train_y.values
train_y=train_y.reshape(-1,1)
valid_x=valid_x.values
valid_y=valid_y.values
valid_y=valid_y.reshape(-1,1)
#ๆจๆบๅ
scaler = StandardScaler()
scaler.fit(train_x)
train_x = scaler.transform(train_x)
valid_x = scaler.transform(valid_x)
#test = scaler.transform(test)
#PCA---------------------------------------------------------------------------
pca_num=0
delta=0.1
pca=PCA(n_components = 0.999999)
train_x=pca.fit_transform(train_x)
valid_x=pca.transform(valid_x)
#test=pca.transform(test)
dimention=train_x.shape[1]
#Standarisation for whitening--------------------------------------------------
# to avoid roudoff error
# log-sum-exp trick----------------------------------------------------------
#case1
#getmin = np.min(train_y)
#getmax = np.max(train_y)
#train_yn = (train_y - getmin) / (getmax - getmin)
#valid_yn = (valid_y - getmin) / (getmax - getmin)
## case2
#train_y=np.log(train_y)
#valid_y=np.log(valid_y)
#DEEP LEARNING STRUCTURE-------------------------------------------------------
print("DNN start")
#learning_rate_setting=[0.1,0.01,0.001,0.0001,0.00001,0.000001,0.0000001]
learning_rate = 0.00001
training_epochs = 400
display_step = 20
batch_size=1024
layer=[300,250,200,180,150,150,100,50,10]
#layer_1_num=300
#layer_2_num=250
#layer_3_num=200
#layer_4_num=180
#layer_5_num=150
#layer_6_num=150
#layer_7_num=100
#layer_8_num=50
#layer_9_num=10
n_samples = train_x.shape[0]
def get_batch(data_x,data_y,batch_size):
batch_n=len(data_x)//batch_size
for i in range(batch_n):
batch_x=data_x[i*batch_size:(i+1)*batch_size]
batch_y=data_y[i*batch_size:(i+1)*batch_size]
yield batch_x,batch_y
def neural_net_model(X_data,input_dim):
epsilon = 0.001
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([fc_mean, fc_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(fc_mean), tf.identity(fc_var)
# layer input multiplying and adding bias then activation function
W_1 = tf.Variable(tf.random_uniform([input_dim,layer[0]])*np.sqrt(1/input_dim))
b_1 = tf.Variable(tf.zeros([layer[0]]))
layer_1 = tf.add(tf.matmul(X_data,W_1), b_1)
layer_1 = tf.nn.relu(layer_1)
# layer 1 multiplying and adding bias then activation function
# layer 1 multiplying and adding bias then activation function
W_2 = tf.Variable(tf.random_uniform([layer[0],layer[1]])*np.sqrt(1/layer[0]))
b_2 = tf.Variable(tf.zeros([layer[1]]))
layer_2 = tf.add(tf.matmul(layer_1,W_2), b_2)
################
# batch normalisation
fc_mean, fc_var = tf.nn.moments(layer_2,axes=[0])
scale_2 = tf.Variable(tf.ones([layer[1]]))
shift_2 = tf.Variable(tf.zeros([layer[1]]))
mean, var = mean_var_with_update()
layer_2 = tf.nn.batch_normalization(layer_2, fc_mean, fc_var, shift_2, scale_2, epsilon)
################
layer_2 = tf.nn.relu(layer_2)
# layer 2 multiplying and adding bias then activation function
# layer 2 multiplying and adding bias then activation function
W_3 = tf.Variable(tf.random_uniform([layer[1],layer[2]])*np.sqrt(1/layer[1]))
b_3 = tf.Variable(tf.zeros([layer[2]]))
layer_3 = tf.add(tf.matmul(layer_2,W_3), b_3)
################
# batch normalisation
fc_mean, fc_var = tf.nn.moments(layer_3,axes=[0])
scale_3 = tf.Variable(tf.ones([layer[2]]))
shift_3 = tf.Variable(tf.zeros([layer[2]]))
mean, var = mean_var_with_update()
layer_3 = tf.nn.batch_normalization(layer_3, fc_mean, fc_var, shift_3, scale_3, epsilon)
################
layer_3 = tf.nn.relu(layer_3)
# layer 2 multiplying and adding bias then activation function
W_4 = tf.Variable(tf.random_uniform([layer[2],layer[3]])*np.sqrt(1/layer[2]))
b_4 = tf.Variable(tf.zeros([layer[3]]))
layer_4 = tf.add(tf.matmul(layer_3,W_4), b_4)
layer_4 = tf.nn.relu(layer_4)
# layer 2 multiplying and adding bias then activation function
W_5 = tf.Variable(tf.random_uniform([layer[3],layer[4]])*np.sqrt(1/layer[3]))
b_5 = tf.Variable(tf.zeros([layer[4]]))
layer_5 = tf.add(tf.matmul(layer_4,W_5), b_5)
layer_5 = tf.nn.relu(layer_5)
# layer 2 multiplying and adding bias then activation function
W_6 = tf.Variable(tf.random_uniform([layer[4],layer[5]])*np.sqrt(1/layer[4]))
b_6 = tf.Variable(tf.zeros([layer[5]]))
layer_6 = tf.add(tf.matmul(layer_5,W_6), b_6)
layer_6 = tf.nn.relu(layer_6)
# layer 2 multiplying and adding bias then activation function
W_7 = tf.Variable(tf.random_uniform([layer[5],layer[6]])*np.sqrt(1/layer[5]))
b_7 = tf.Variable(tf.zeros([layer[6]]))
layer_7 = tf.add(tf.matmul(layer_6,W_7), b_7)
layer_7 = tf.nn.relu(layer_7)
# layer 2 multiplying and adding bias then activation function
W_8 = tf.Variable(tf.random_uniform([layer[6],layer[7]])*np.sqrt(1/layer[6]))
b_8 = tf.Variable(tf.zeros([layer[7]]))
layer_8 = tf.add(tf.matmul(layer_7,W_8), b_8)
layer_8 = tf.nn.relu(layer_8)
# layer 2 multiplying and adding bias then activation function
W_9 = tf.Variable(tf.random_uniform([layer[7],layer[8]])*np.sqrt(1/layer[7]))
b_9 = tf.Variable(tf.zeros([layer[8]]))
prediction = tf.add(tf.matmul(layer_8,W_9), b_9)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=y))
# O/p layer multiplying and adding bias then activation function
# notice output layer has one node only since performing #regression
return cost , prediction
cost_history = np.empty(shape=[1],dtype=float)
cost_history_plot=[]
X = tf.placeholder("float32",[None, dimention],name="my_x")
Y = tf.placeholder("float32",name="my_y")
# our mean squared error cost function
# Gradinent Descent optimiztion just discussed above for updating weights and biases
cost,prediction = neural_net_model(X,dimention)
correct = tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1)) #get the max reaction and decide to the prediction class.
accuracy = tf.reduce_mean(tf.cast(correct,'float'))# cast ่กจ็คบๅฐๅไพ็data่ฝๆ็บๅ
ถไปtype
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
# Run the initializer
# sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in get_batch(train_x,train_y,batch_size):
# x = x.reshape(x.shape[0],batch_size)
# x=np.transpose(x)
# y = y.reshape(y.shape[0],1)
sess.run(optimizer, feed_dict={X: x, Y: y})
# cost_history = np.append(cost_history,sess.run(cost,feed_dict={X:x,Y:y}))
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_x, Y:train_y})
pred_valid = sess.run(cost,feed_dict={X:valid_x,Y:valid_y})
pred_train = sess.run(cost,feed_dict={X:train_x,Y:train_y})
print('Number: %d epoch' % (epoch+1),'\n','valid cost: ' , pred_valid)
print('Number: %d epoch' % (epoch+1),'\n','train cost: ' , pred_train)
accuracy_valid = sess.run(accuracy,feed_dict={x:valid_x,y:valid_y})
accuracy_train = sess.run(accuracy,feed_dict={x:train_x,y:train_y})
print('valid Acc: ' , accuracy_valid)
print('Train Acc: ' , accuracy_train)
cost_history = np.append(cost_history,sess.run(cost,feed_dict={X:train_x,Y:train_y}))
# print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
# print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
# "W=", sess.run(W),"b=", sess.run(b))
print("Optimization Finished!")
# print("Training cost=", training_cost,'\n')
#learning rate็ฃๆง
cost_history_plot=np.append(cost_history_plot,cost_history)
cost_history_forLR=cost_history_plot
# show final accuracy
pred_valid = sess.run(cost,feed_dict={X:valid_x,Y:valid_y})
pred_train = sess.run(cost,feed_dict={X:train_x,Y:train_y})
print('Number: %d epoch' % (epoch+1),'\n','valid cost: ' , pred_valid)
print('Number: %d epoch' % (epoch+1),'\n','train cost: ' , pred_train)
accuracy_valid = sess.run(accuracy,feed_dict={x:valid_x,y:valid_y})
accuracy_train = sess.run(accuracy,feed_dict={x:train_x,y:train_y})
print('valid Acc: ' , accuracy_valid)
print('Train Acc: ' , accuracy_train)
# save
oSaver = tf.train.Saver()
oSess = sess
oSaver.save(oSess,"./crime_model")
#plot different learning rates figure
#cost_history_plot=np.array(cost_history_plot)
#ja.Plot(np.arange(len(cost_history_plot)),cost_history_plot)
# ็ซๅ้ฉ่ญcost, epoch, optimal point============================================
#=============================================================================
plt.figure(4)
fig, ax = plt.subplots()
ax.plot(cost_history,'r')
ax.set_xlabel('epoch')
ax.set_ylabel('Cost')
A=np.array(cost_history)
best_epoch=np.argmin(A)
print('best_cost:',min(cost_history),'achieved at epoch:',best_epoch)
plt.show()
plt.pause(0.1)
# =============================================================================
# start for testing set
# =============================================================================
#print("start to test the data")
#test=np.array(test, dtype=np.float64)
#saver = tf.train.Saver()
#
#with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# saver.restore(sess, "./house_test_01_model")
#
# test_y = sess.run(pred, feed_dict={X: test})
# test_y=test_y.flatten()
# Id=np.array(Id)
## test_y=pd.DataFrame({"SalePrice":test_y})
#
## #้ๅPCA
## test_y=pca.inverse_transform(test_y)
## #้ๅๆจๆบๅ
## test_y=scaler.inverse_transform(test_y)
#
# submission = pd.DataFrame(data={"Id":Id,"SalePrice":test_y}, index=[np.arange(1459)])
# submission.to_csv("submission_house_result.csv", index=False)
|
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.auth.views import (
password_reset, password_reset_done, password_reset_confirm,
password_reset_complete, password_change, password_change_done,
)
from django.test import RequestFactory, TestCase
from django.test import override_settings
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='django.contrib.auth.tests.urls',
)
class AuthTemplateTests(TestCase):
def test_titles(self):
rf = RequestFactory()
user = User.objects.create_user('jsmith', 'jsmith@example.com', 'pass')
user = authenticate(username=user.username, password='pass')
request = rf.get('/somepath/')
request.user = user
response = password_reset(request, post_reset_redirect='dummy/')
self.assertContains(response, '<title>Password reset</title>')
self.assertContains(response, '<h1>Password reset</h1>')
response = password_reset_done(request)
self.assertContains(response, '<title>Password reset sent</title>')
self.assertContains(response, '<h1>Password reset sent</h1>')
# password_reset_confirm invalid token
response = password_reset_confirm(request, uidb64='Bad', token='Bad', post_reset_redirect='dummy/')
self.assertContains(response, '<title>Password reset unsuccessful</title>')
self.assertContains(response, '<h1>Password reset unsuccessful</h1>')
# password_reset_confirm valid token
default_token_generator = PasswordResetTokenGenerator()
token = default_token_generator.make_token(user)
uidb64 = force_text(urlsafe_base64_encode(force_bytes(user.pk)))
response = password_reset_confirm(request, uidb64, token, post_reset_redirect='dummy/')
self.assertContains(response, '<title>Enter new password</title>')
self.assertContains(response, '<h1>Enter new password</h1>')
response = password_reset_complete(request)
self.assertContains(response, '<title>Password reset complete</title>')
self.assertContains(response, '<h1>Password reset complete</h1>')
response = password_change(request, post_change_redirect='dummy/')
self.assertContains(response, '<title>Password change</title>')
self.assertContains(response, '<h1>Password change</h1>')
response = password_change_done(request)
self.assertContains(response, '<title>Password change successful</title>')
self.assertContains(response, '<h1>Password change successful</h1>')
|
#Find the sum of the series 2 +22 + 222 + 2222 + .. n terms
n = int(input('Enter the iteration number:'))
sum = 0
for i in range (1,n+1):
x = int('2' * i)
sum += x
print(sum) |
mystr = "Rehan is a good man"
print(len(mystr))
print(mystr[0:5])
print(mystr[::-1])
print(mystr[11:-4])
print(mystr.isalnum())
print(mystr.endswith("man"))
print(mystr.endswith("manw"))
print(mystr.capitalize())
print(mystr.upper())
print(mystr.lower())
print(mystr.replace("Rehan", "Reho"))
print(mystr.count("n"))
|
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.app import App
import firebase
url = "https://elainejomane.firebaseio.com/" # URL to Firebase database
token = "D68F25AuafW6vMWNHGx4iYTzL68rJw2AAiJ9QCOI" # unique token used for authentication
firebase = firebase.FirebaseApplication(url, token)
class MainScreen(Screen):
def __init__(self, **kwargs):
Screen.__init__(self, **kwargs)
self.layout=BoxLayout()
bcls = Button(text="Classrooms", on_press=self.changeToClass, halign = 'left')
self.layout.add_widget(bcls)
bmet = Button(text="Meeting Rooms", on_press=self.changeToMeeting, halign = 'left')
self.layout.add_widget(bmet)
blib = Button(text="Library", on_press=self.changeToLibrary, halign = 'left')
self.layout.add_widget(blib)
btn = Button(text="Quit", on_press=self.quitApp, halign = 'left')
self.layout.add_widget(btn)
self.add_widget(self.layout)
def changeToClass(self, value):
self.manager.transition.direction = 'left'
# modify the current screen to a different "name"
self.manager.current= 'classrooms'
def changeToMeeting(self, value):
self.manager.transition.direction = 'right'
# modify the current screen to a different "name"
self.manager.current= 'meetingrooms'
def changeToLibrary(self, value):
self.manager.transition.direction = 'down'
# modify the current screen to a different "name"
self.manager.current= 'library'
def quitApp(self, value):
App.get_running_app().stop()
class ClassroomScreen(Screen):
def __init__(self, **kwargs):
Screen.__init__(self, **kwargs)
self.layout=GridLayout(cols=2)
# Add your code below to add the two Buttons
self.cr1t = Label(text="Classroom 1", halign = 'left')
self.layout.add_widget(self.cr1t)
self.cr1b = Button(text="Book", on_press=self.book1, halign = 'left')
self.layout.add_widget(self.cr1b)
self.cr2t = Label(text="Classroom 2", halign = 'left')
self.layout.add_widget(self.cr2t)
self.cr2b = Button(text="Book", on_press=self.book2, halign = 'left')
self.layout.add_widget(self.cr2b)
bmn = Button(text="Back to Main", on_press=self.changeToMain, halign = 'left')
self.layout.add_widget(bmn)
bmet = Button(text="Meeting Rooms", on_press=self.changeToMeeting, halign = 'left')
self.layout.add_widget(bmet)
blib = Button(text="Library", on_press=self.changeToLibrary, halign = 'left')
self.layout.add_widget(blib)
self.clr1 = firebase.get('/clr1')
self.clr2 = firebase.get('/clr2')
if self.clr1 == 0:
self.cr1b.disabled = False
elif self.clr1 == 1:
self.cr1b.disabled = True
elif self.clr1 == 2:
self.cr1b.disabled = True
else:
self.cr1b.disabled = False
self.clr1 = 0
if self.clr2 == 0:
self.cr2b.disabled = False
elif self.clr2 == 1:
self.cr2b.disabled = True
elif self.clr2 == 2:
self.cr2b.disabled = True
else:
self.cr2b.disabled = False
self.clr2 = 0
btn = Button(text="Quit", on_press=self.quitApp, halign = 'left')
self.layout.add_widget(btn)
self.add_widget(self.layout)
def book1(self,value):
if self.clr1 == 0:
self.clr1 = 2
self.cr1b.disabled = True
firebase.put('/','clr1',self.clr1)
elif self.clr1 == 1:
self.cr1b.disabled = True
elif self.clr1 == 2:
self.cr1b.disabled = True
else:
self.cr1b.disabled = False
self.clr1 = 0
def book2(self,value):
if self.clr2 == 0:
self.clr2 = 2
self.cr2b.disabled = True
firebase.put('/','clr2',self.clr2)
elif self.clr2 == 1:
self.cr2b.disabled = True
elif self.clr2 == 2:
self.cr1b.disabled = True
else:
self.cr2b.disabled = False
self.clr2 = 0
def changeToMain(self, value):
self.manager.transition.direction = 'right'
# modify the current screen to a different "name"
self.manager.current= 'main'
def changeToMeeting(self, value):
self.manager.transition.direction = 'right'
# modify the current screen to a different "name"
self.manager.current= 'meetingrooms'
def changeToLibrary(self, value):
self.manager.transition.direction = 'down'
# modify the current screen to a different "name"
self.manager.current= 'library'
def changeToSetting(self, value):
self.manager.transition.direction = 'left'
# modify the current screen to a different "name"
self.manager.current= 'settings'
def quitApp(self, value):
App.get_running_app().stop()
class MeetingroomScreen(Screen):
def __init__(self, **kwargs):
Screen.__init__(self, **kwargs)
self.layout=BoxLayout()
bmn = Button(text="Back to Main", on_press=self.changeToMain, halign = 'left')
self.layout.add_widget(bmn)
bcls = Button(text="Classrooms", on_press=self.changeToClass, halign = 'left')
self.layout.add_widget(bcls)
blib = Button(text="Library", on_press=self.changeToLibrary, halign = 'left')
self.layout.add_widget(blib)
btn = Button(text="Quit", on_press=self.quitApp, halign = 'left')
self.layout.add_widget(btn)
self.add_widget(self.layout)
def changeToMain(self, value):
self.manager.transition.direction = 'right'
# modify the current screen to a different "name"
self.manager.current= 'main'
def changeToClass(self, value):
self.manager.transition.direction = 'right'
# modify the current screen to a different "name"
self.manager.current= 'classrooms'
def changeToLibrary(self, value):
self.manager.transition.direction = 'down'
# modify the current screen to a different "name"
self.manager.current= 'library'
def quitApp(self, value):
App.get_running_app().stop()
class LibraryScreen(Screen):
def __init__(self, **kwargs):
Screen.__init__(self, **kwargs)
self.layout=BoxLayout()
bmn = Button(text="Back to Main", on_press=self.changeToMain, halign = 'left')
self.layout.add_widget(bmn)
bcls = Button(text="Classrooms", on_press=self.changeToClass, halign = 'left')
self.layout.add_widget(bcls)
blib = Button(text="Meeting Rooms", on_press=self.changeToMeeting, halign = 'left')
self.layout.add_widget(blib)
btn = Button(text="Quit", on_press=self.quitApp, halign = 'left')
self.layout.add_widget(btn)
self.add_widget(self.layout)
def changeToMain(self, value):
self.manager.transition.direction = 'right'
# modify the current screen to a different "name"
self.manager.current= 'main'
def changeToClass(self, value):
self.manager.transition.direction = 'right'
# modify the current screen to a different "name"
self.manager.current= 'classrooms'
def changeToMeeting(self, value):
self.manager.transition.direction = 'down'
# modify the current screen to a different "name"
self.manager.current= 'meetingrooms'
def quitApp(self, value):
App.get_running_app().stop()
class BookingApp(App):
def build(self):
sm=ScreenManager()
ms=MainScreen(name='main')
clr=ClassroomScreen(name='classrooms')
mtr=MeetingroomScreen(name='meetingrooms')
lib=LibraryScreen(name='library')
sm.add_widget(ms)
sm.add_widget(clr)
sm.add_widget(mtr)
sm.add_widget(lib)
sm.current='main'
return sm
if __name__=='__main__':
BookingApp().run() |
import re
from django.db.models import Max
from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import Mineral
from ..forms import MineralSearchForm
from ..templatetags.mineral_extras import GROUPS, COLOURS, ALPHABET
class GlobalsTests(TestCase):
def test_groups_list(self):
"""assert that the GROUPS list has groups in it"""
self.assertGreater(len(GROUPS), 0)
def test_colours_list(self):
"""assert that the COLOURS list has colours in it"""
self.assertGreater(len(COLOURS), 0)
def test_alpha_list(self):
"""assert that the ALPHABET list has all letters in it"""
self.assertEqual(len(ALPHABET), 26)
def test_alpha_list_valid(self):
"""assert that the ALPHABET list has valid single letters in
it
"""
for letter in ALPHABET:
self.assertRegex(letter, re.compile(r'^[a-z]$'))
class DetailViewTests(TestCase):
fixtures = ['test_data.json']
def test_hard_url_with_arg(self):
resp = self.client.get('/detail/1')
self.assertEqual(resp.status_code, 200)
def test_hard_url_without_arg(self):
resp = self.client.get('/detail/')
self.assertEqual(resp.status_code, 404)
def test_named_url(self):
resp = self.client.get(
reverse('mineralsearch:detail', kwargs={'pk': 1}))
self.assertEqual(resp.status_code, 200)
def test_template_used(self):
resp = self.client.get(
reverse('mineralsearch:detail', kwargs={'pk': 1}))
self.assertTemplateUsed(resp, 'mineralsearchapp/detail.html')
def test_single_mineral_is_retrieved(self):
"""This asserts a class not a queryset so we know that the count
is one
"""
resp = self.client.get(
reverse('mineralsearch:detail', kwargs={'pk': 1}))
self.assertIsInstance(resp.context['mineral'], Mineral)
class RandomViewTests(TestCase):
fixtures = ['test_data.json']
def test_mineral_count(self):
minerals = Mineral.objects.aggregate(
number_of_minerals=Max('id')
)
number = minerals['number_of_minerals']
self.assertGreater(number, 0)
mineral = Mineral.objects.get(
id=number
)
self.assertIsInstance(mineral, Mineral)
def test_hard_url_with_arg(self):
resp = self.client.get('/random/')
self.assertEqual(resp.status_code, 200)
def test_named_url(self):
resp = self.client.get(reverse('mineralsearch:random'))
self.assertEqual(resp.status_code, 200)
def test_template_used(self):
resp = self.client.get(reverse('mineralsearch:random'))
self.assertTemplateUsed(resp, 'mineralsearchapp/detail.html')
def test_single_mineral_is_retrieved(self):
"""This asserts a class not a queryset so we know that the count
is one
"""
resp = self.client.get(reverse('mineralsearch:random'))
self.assertIsInstance(resp.context['mineral'], Mineral)
class LetterViewTests(TestCase):
fixtures = ['test_data.json']
def test_hard_url_with_arg(self):
resp = self.client.get('/letter/z')
self.assertEqual(resp.status_code, 200)
def test_hard_url_without_arg(self):
resp = self.client.get('/letter/')
self.assertEqual(resp.status_code, 404)
def test_hard_url_without_doublearg(self):
resp = self.client.get('/letter/ff')
self.assertEqual(resp.status_code, 404)
def test_named_url(self):
resp = self.client.get(
reverse('mineralsearch:letter', kwargs={'letter': 'z'}))
self.assertEqual(resp.status_code, 200)
def test_template_used(self):
resp = self.client.get(
reverse('mineralsearch:letter', kwargs={'letter': 'z'}))
self.assertTemplateUsed(resp, 'mineralsearchapp/index.html')
c_elements = ['cacoxenite', 'cadmoindite', 'cafarsite', 'cahnite',
'calaverite', 'calcite', 'calderite', 'caledonite', 'calumetite',
'cancrinite', 'canfieldite', 'carletonite', 'carlsbergite', 'carminite',
'carnallite', 'carnotite', 'carpathite', 'carpholite', 'carrollite',
'caryopilite', 'cassiterite', 'cavansite', 'celadonite', 'celestine',
'celsian', 'cerite', 'cerussite', 'cervantite', 'chabazite',
'chalcanthite', 'chalcocite', 'chalcophyllite', 'chalcopyrite',
'chambersite', 'chamosite', 'chapmanite', 'charoite', 'chesterite',
'childrenite', 'chlorargyrite', 'chlorite group', 'chloritoid',
'chlormayenite', 'chloroxiphite', 'chondrodite', 'chromite', 'chrysoberyl',
'chrysocolla', 'chrysotile', 'cinnabar', 'claudetite', 'clausthalite',
'clinoclase', 'clinodehrite', 'clinohumite', 'clinoptilolite',
'clinozoisite', 'clintonite', 'cobaltite', 'coccinite', 'coesite',
'coffinite', 'colemanite', 'collinsite', 'coloradoite', 'columbite-(fe)',
'conichalcite', 'connellite', 'copiapite', 'copper', 'corderoite',
'cordierite', 'cornubite', 'cornwallite', 'corundum', 'cotunnite',
'covellite', 'creedite', 'cristobalite', 'crocoite', 'cronstedtite',
'crossite', 'cryolite', 'cryptomelane', 'cubanite', 'cummingtonite',
'cuprite', 'cuprosklodowskite', 'curite', 'cyanotrichite', 'cylindrite',
'cyrilovite'
]
c_elements.sort()
def test_alpha_order(self):
resp = self.client.get(
reverse('mineralsearch:letter', kwargs={'letter': 'c'})
)
context = [x['name'] for x in resp.context['minerals']]
elements = self.c_elements
self.assertSequenceEqual(context, elements)
def test_content_contains_context(self):
resp = self.client.get(
reverse('mineralsearch:letter', kwargs={'letter': 'c'})
)
self.assertInHTML(
'<a class="minerals__anchor" href="/detail/148">Cacoxenite</a>',
resp.content.decode('utf-8')
)
class SearchViewTests(TestCase):
fixtures = ['test_data.json']
def test_hard_url_with_arg(self):
resp = self.client.get('/search/', data={'q': 'gold'})
self.assertEqual(resp.status_code, 200)
def test_hard_url_without_arg(self):
"""An empty q in form is ok, it will just return all minerals
"""
resp = self.client.get('/search/', data={'q': ''})
self.assertEqual(resp.status_code, 200)
def test_named_url(self):
resp = self.client.get(reverse('mineralsearch:search'),
data={'q': 'gold'})
self.assertEqual(resp.status_code, 200)
def test_template_used(self):
resp = self.client.get(reverse('mineralsearch:search'),
data={'q': 'gold'})
self.assertTemplateUsed(resp, 'mineralsearchapp/index.html')
def test_one_mineral_is_retrieved(self):
"""We know there is only one mineral with the q of Zunyite
"""
resp = self.client.get(reverse('mineralsearch:search'),
data={'q': 'Zunyite'})
self.assertEqual(len(resp.context['minerals']), 1)
class GroupViewTests(TestCase):
fixtures = ['test_data.json']
def test_hard_url_with_arg(self):
resp = self.client.get('/group/Oxides')
self.assertEqual(resp.status_code, 200)
def test_hard_url_without_arg(self):
resp = self.client.get('/group/')
self.assertEqual(resp.status_code, 404)
def test_named_url(self):
resp = self.client.get(
reverse('mineralsearch:group', kwargs={'group': 'Oxides'}))
self.assertEqual(resp.status_code, 200)
def test_template_used(self):
resp = self.client.get(
reverse('mineralsearch:group', kwargs={'group': 'Oxides'}))
self.assertTemplateUsed(resp, 'mineralsearchapp/index.html')
def test_6_minerals_retrieved_by_group_filter(self):
"""There are only 6 minerals in the Native Elements group
"""
resp = self.client.get(
reverse('mineralsearch:group',
kwargs={'group': 'Native Elements'})
)
self.assertEqual(len(resp.context['minerals']), 6)
class ColourViewTests(TestCase):
fixtures = ['test_data.json']
def test_hard_url_with_arg(self):
resp = self.client.get('/colour/gold')
self.assertEqual(resp.status_code, 200)
def test_hard_url_without_arg(self):
resp = self.client.get('/colour/')
self.assertEqual(resp.status_code, 404)
def test_named_url(self):
resp = self.client.get(
reverse('mineralsearch:colour', kwargs={'colour': 'gold'}))
self.assertEqual(resp.status_code, 200)
def test_template_used(self):
resp = self.client.get(
reverse('mineralsearch:colour', kwargs={'colour': 'gold'}))
self.assertTemplateUsed(resp, 'mineralsearchapp/index.html')
def test_correct_minerals_are_retrieved(self):
"""This asserts a class not a queryset so we know that the count
is one
"""
resp = self.client.get(
reverse('mineralsearch:colour', kwargs={'colour': 'gold'}))
self.assertEqual(len(resp.context['minerals']), 9)
|
#!/usr/bin/env python
def generate_plane():
plane_length = [row for row in range(128)]
plane_width = [column for column in range(8)]
return plane_length, plane_width
def split_plane(seat_values, seat_range):
seat_tracker = seat_range
for value in seat_values:
if value == "F" or value == "L":
seat_tracker = seat_tracker[:len(seat_tracker)//2]
if value == "B" or value == "R":
seat_tracker = seat_tracker[len(seat_tracker)//2:]
return seat_tracker[0]
def find_seat_max(inputfile):
seats = open(inputfile).read().splitlines()
plane_length, plane_width = generate_plane()
max_seatID = 0
for seat in seats:
row = split_plane(seat[:7], plane_length)
column = split_plane(seat[7:], plane_width)
seatID = int(row) * 8 + int(column)
if seatID > max_seatID:
max_seatID = seatID
return(max_seatID)
if __name__ == "__main__":
print(find_seat_max("day5input.txt"))
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
import pyaudio
import oscilators
# ADSR (Attack-Destroy-Sustain-Release) Envelope
# outputs a(t), amplitude as a function of time
def envelope(t, start, final, rate):
dur = t[t.size-1]
return (final - start) * np.power(t/dur,rate) + start
# Frequency Modulation (FM) Synthesis
def fm(amp_mod, amp_carr, f_mod, f_carr, t, phase_mod=0, phase_carr=0):
return amp_carr * np.sin((2 * np.pi * f_carr + sin(amp_mod, f_mod, t, phase_mod)) * t + phase_carr)
def write(audio, sps, out):
wavfile.write(out, sps, audio)
def play(audio, sps):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=sps,
output=True)
stream.write(audio.tostring())
stream.stop_stream()
stream.close()
p.terminate()
SPS = 44100 # 44.1 kHz or 44100 samples per second (48 kHz other alternative)
DURATION_S = 60
samples = np.arange(DURATION_S * SPS) / SPS
wave = oscilators.Sin(0.3,261.63,samples,lfo=oscilators.Sin(0.3,15,samples).generate())
# osc = lfo(0.3,0.3,261.63,samples,20)
# write(fm(0.3,0.3,amp,146.832,samples),SPS,'test.wav')
# write(triangle(0.3,261.63,samples),SPS,'test.wav')
write(wave.generate(),SPS,'test.wav')
# play(wave,SPS)
# plt.plot(samples,fm(0.3,0.3,5,100,samples))
# plt.show()
|
from PIL import Image
import os, glob
image_size = 50
from_dir = "C:/Users/masho/Desktop/work/python/Python/lib/movie/20191114231101207027"#็ทจ้ใใใๅ็ปใฎใใน
to_dir = "C:/Users/masho/Desktop/work/python/Python/lib/movie/aaaa/"#ใใชใใณใฐใใใๅ็ปใฎใใน
for path in glob.glob(os.path.join(from_dir, '*.png')):
img = Image.open(path) # ่ชญใฟ่พผใฟ
img = img.resize((image_size, image_size)) # ใชใตใคใบ
basename = os.path.basename(img)
img.save(os.path.join(to_dir, basename))
|
import numpy as np
from torch import optim
from .history import History
from .earlystopping import EarlyStopping
from ...utils.progress import Progress
class Trainer:
def __init__(self, model, loader, optimizer = None, keep_history = None,
early_stopping = EarlyStopping()):
"""
Args:
model (nn.Module)
loader (torch.DataLoader)
optimizer (torch.Optimier)
early_stopping (EarlyStopping)
keep_history (int): keep the last n-th epoch logs, `None` will keep all
"""
self.model = model
self.loader = loader
if optimizer is None:
optimizer = optim.Adam(model.parameters(), lr = 1e-3)
self.optimizer = optimizer
self.early_stop = early_stopping
from collections import deque
self.history = deque(maxlen = keep_history)
def train(self, epoch = 10, callback = None):
"""
Args:
epoch (int): number of epoch for training loop
callback (callable): callable function will be called every epoch
"""
# init progress bar
p = Progress(self.loader)
for ep in range(epoch):
p.prefix = f"Epoch:{ep}"
# setup a new history for model in each epoch
history = History()
self.history.append(history)
self.model._history = history
loss = 0.
for i, batch in enumerate(p, start = 1):
# step fit
l = self.model.fit_step(batch)
# log loss
self.model.log('loss', l)
self.optimizer.zero_grad()
l.backward()
self.optimizer.step()
loss += (l.item() - loss) / i
p.suffix = 'loss:{:.4f}'.format(loss)
if self.early_stop and self.early_stop(self.model, history, epoch = ep):
# set best state to model
best_state = self.early_stop.get_best_state()
self.model.load_state_dict(best_state)
break
if callable(callback):
callback(ep, history)
return self.model
|
# Iterable:ๅฏ่ฟญไปฃๅฏน่ฑก ่ฝๅค้่ฟforๅพช็ฏๆฅ้ๅ้้ข็ๅ
็ด ็ๅฏน่ฑก
# ๅฏไปฅ่ขซnext()ๅฝๆฐ่ฐ็จๅนถไธๆญ่ฟๅไธไธไธชๅผ็ๅฏน่ฑก็งฐไธบ่ฟญไปฃๅจ
# ไฝฟ็จisinstance()ๆนๆณๅคๆญไธไธชๅฏน่ฑกๆฏๅฆๆฏ่ฟญไปฃๅจ
from collections.abc import Iterable
from collections.abc import Iterator
a = {}
b = (1,)
c = []
def tesdt1(args):
if isinstance(args, Iterable):
print('ๆฏๅฏ่ฟญไปฃๅฏน่ฑก')
else:
print('ไธๆฏๅฏ่ฟญไปฃๅฏน่ฑก')
# tesdt1(1)
def tesdt2(args):
if isinstance(args, Iterator):
print('ๆฏๅฏ่ฟญไปฃๅฏน่ฑก')
else:
print('ไธๆฏๅฏ่ฟญไปฃๅฏน่ฑก')
# tesdt2((x for x in range(32)))
# ไฝฟ็จiter()ๅฐlist,dict,strๅไธบ่ฟญไปฃๅจ
tesdt2(iter(a)) |
import pytest
from Zimperium import Client, events_search, users_search, user_get_by_id, devices_search, device_get_by_id, \
devices_get_last_updated, app_classification_get, file_reputation, fetch_incidents, report_get
from test_data.response_constants import RESPONSE_SEARCH_EVENTS, RESPONSE_SEARCH_USERS, RESPONSE_USER_GET_BY_ID, \
RESPONSE_SEARCH_DEVICES, RESPONSE_DEVICE_GET_BY_ID, RESPONSE_APP_CLASSIFICATION_GET, \
RESPONSE_MULTIPLE_APP_CLASSIFICATION_GET, RESPONSE_GET_LAST_UPDATED_DEVICES, RESPONSE_REPORT_GET_ITUNES_ID, \
RESPONSE_MULTIPLE_EVENTS_FETCH
from test_data.result_constants import EXPECTED_SEARCH_EVENTS, EXPECTED_SEARCH_USERS, EXPECTED_USER_GET_BY_ID, \
EXPECTED_SEARCH_DEVICES, EXPECTED_DEVICE_GET_BY_ID, EXPECTED_GET_LAST_UPDATED_DEVICES, \
EXPECTED_APP_CLASSIFICATION_GET, EXPECTED_MULTIPLE_APP_CLASSIFICATION_GET, EXPECTED_REPORT_GET_ITUNESID
@pytest.mark.parametrize('command, args, http_response, context', [
(events_search, {'query': 'eventId==*', 'size': '10', 'page': '0', 'verbose': 'true'}, RESPONSE_SEARCH_EVENTS,
EXPECTED_SEARCH_EVENTS),
(users_search, {'query': 'objectId==*', 'size': '10', 'page': '0'}, RESPONSE_SEARCH_USERS, EXPECTED_SEARCH_USERS),
(user_get_by_id, {'object_id': '1B9182C7-8C12-4499-ADF0-A338DEFDFC33'}, RESPONSE_USER_GET_BY_ID,
EXPECTED_USER_GET_BY_ID),
(devices_search, {'query': 'deviceId==*', 'size': '10', 'page': '0'}, RESPONSE_SEARCH_DEVICES,
EXPECTED_SEARCH_DEVICES),
(device_get_by_id, {'zdid': "87a587de-283f-48c9-9ff2-047c8b025b6d"}, RESPONSE_DEVICE_GET_BY_ID,
EXPECTED_DEVICE_GET_BY_ID),
(devices_get_last_updated, {'from_last_update': "5 days"}, RESPONSE_GET_LAST_UPDATED_DEVICES,
EXPECTED_GET_LAST_UPDATED_DEVICES),
(app_classification_get, {'app_hash': "aad9b2fd4606467f06931d72048ee1dff137cbc9b601860a88ad6a2c092"},
RESPONSE_APP_CLASSIFICATION_GET, EXPECTED_APP_CLASSIFICATION_GET),
(app_classification_get, {'app_name': "Duo"},
RESPONSE_MULTIPLE_APP_CLASSIFICATION_GET, EXPECTED_MULTIPLE_APP_CLASSIFICATION_GET),
(report_get, {'itunes_id': '331177714'}, RESPONSE_REPORT_GET_ITUNES_ID, EXPECTED_REPORT_GET_ITUNESID),
])
def test_zimperium_commands(command, args, http_response, context, mocker):
"""Unit test
Given
- demisto args
- raw response of the http request
When
- mock the http request result
Then
- convert the result to human readable table
- create the context
- validate the expected_result and the created context
"""
client = Client(base_url="https://domain.zimperium.com/", api_key="api_key", verify=False)
mocker.patch.object(Client, '_http_request', return_value=http_response)
command_results = command(client, args)
assert command_results.outputs == context
def test_file_reputation(mocker):
"""Unit test
Given
- file reputation command
- command args
- command raw response
When
- mock the Client's http_request.
Then
- run the file reputation command using the Client
Validate The contents of the outputs and indicator of the results
"""
client = Client(base_url="https://domain.zimperium.com/", api_key="api_key", verify=False)
mocker.patch.object(Client, '_http_request', return_value=RESPONSE_APP_CLASSIFICATION_GET)
command_results_list = file_reputation(client,
args={'file': "aad9b2fd4606467f06931d72048ee1dff137cbc9b601860a88ad6a2c092"})
assert command_results_list[0].indicator.dbot_score.score == 1
def test_file_reputation_404(mocker):
"""Unit test
Given
- file reputation command
- command args
- command raw response
When
- Sending HTTP request and getting 404 status code (not found)
Then
- run the file reputation command using the Client
- Ensure we set the file reputation as unknown
"""
client = Client(base_url="https://domain.zimperium.com/", api_key="api_key", verify=False)
def error_404_mock(message, error):
raise Exception('Error in API call [404]')
mocker.patch('Zimperium.Client.app_classification_get_request', side_effect=error_404_mock)
command_results_list = file_reputation(client,
args={'file': "aad9b2fd4606467f06931d72048ee1dff137cbc9b601860a88ad6a2c092"})
assert command_results_list[0].indicator.dbot_score.score == 0
def test_fetch_incidents(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's http_request.
Then
- run the fetch incidents command using the Client
Validate The length of the results and the incident name.
"""
client = Client(base_url="https://domain.zimperium.com/", api_key="api_key", verify=False)
mocker.patch.object(Client, '_http_request', return_value=RESPONSE_MULTIPLE_EVENTS_FETCH)
_, incidents = fetch_incidents(client, last_run={}, fetch_query='', first_fetch_time='3 days', max_fetch='50')
assert len(incidents) == 14
assert incidents[0].get('name') == "Detected network scan after connecting to Free Wi-Fi. No active attacks were" \
" detected and this network will continue to be monitored. It is safe to" \
" continue to use this network."
def test_fetch_incidents_last_event_ids(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the last_event_ids and time.
- mock the Client's http_request.
Then
- run the fetch incidents command using the Client
Validate that no incidents will be returned.
"""
client = Client(base_url="https://domain.zimperium.com/", api_key="api_key", verify=False)
mocker.patch.object(Client, '_http_request', return_value=RESPONSE_MULTIPLE_EVENTS_FETCH)
last_run = {
'time': "whatever",
'last_event_ids': [
'421931cc-13bf-422a-890b-9958011e4926',
'239be3f7-ead8-4157-b24c-35590811ca19',
'102065eb-7ffa-4a70-b35f-bc8ca655f9ee',
'431638cf-21fc-4fba-86b2-0e2a4850705b',
'bef068eb-5482-469c-990a-5ea363e029a0',
'c37d7379-589e-4976-8cf2-6f2876ba7e6a',
'4f1a77cf-fb76-4753-b09b-422fa8a9e102',
'4a688920-372d-45b6-934d-284d5ecacb29',
'22b960e7-554a-413a-bcbf-2da75bbb2731',
'5f9609a6-974c-4c0d-b007-7934ddf76cff',
'461d1b55-53f2-4b89-b337-c24367b525ef',
'55a43106-9c1c-47e2-9f9f-ce212304f4c0',
'7dc89a3d-6fd0-4090-ac4c-f19e33402576',
'e696ad05-32d5-43e8-95c3-5060b0ee468e',
]
}
_, incidents = fetch_incidents(client, last_run=last_run, fetch_query='', first_fetch_time='3 days', max_fetch='50')
assert len(incidents) == 0
|
feature_names = [
"z",
"y",
"x",
"Sum",
"Mean",
"Std",
"Var",
"bb_vol",
"bb_vol_log10",
"bb_vol_depth",
"bb_vol_height",
"bb_vol_width",
"ori_vol",
"ori_vol_log10",
"ori_vol_depth",
"ori_vol_height",
"ori_vol_width",
"seg_surface_area",
"seg_volume",
"seg_sphericity",
]
|
from datetime import date
def solution(mon: int, day: int) -> str:
return date(2016, mon, day).strftime("%a").upper()
|
# Generated by Django 2.2 on 2019-05-13 20:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0017_auto_20190429_2238'),
]
operations = [
migrations.AddField(
model_name='lecture',
name='free',
field=models.BooleanField(default=False),
),
]
|
class NotIterable:
pass
no = NotIterable()
def iterate():
for i in no:
print(i)
def assert_not_iterable():
try:
iterate()
except TypeError as e:
assert e.args == ("'NotIterable' object is not iterable",)
else:
assert False, 'Should not be iterable'
assert_not_iterable()
no.__iter__ = lambda: iter(range(3))
assert_not_iterable()
NotIterable.__iter__ = lambda self: iter(range(3))
iterate()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class StreamingJobsOperations(object):
"""StreamingJobsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~stream_analytics_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_replace_initial(
self,
resource_group_name, # type: str
job_name, # type: str
streaming_job, # type: "models.StreamingJob"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.StreamingJob"
cls = kwargs.pop('cls', None) # type: ClsType["models.StreamingJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_replace_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(streaming_job, 'StreamingJob')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('StreamingJob', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('StreamingJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_replace_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}'} # type: ignore
def begin_create_or_replace(
self,
resource_group_name, # type: str
job_name, # type: str
streaming_job, # type: "models.StreamingJob"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.StreamingJob"]
"""Creates a streaming job or replaces an already existing streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param streaming_job: The definition of the streaming job that will be used to create a new
streaming job or replace the existing one.
:type streaming_job: ~stream_analytics_management_client.models.StreamingJob
:param if_match: The ETag of the streaming job. Omit this value to always overwrite the current
record set. Specify the last-seen ETag value to prevent accidentally overwriting concurrent
changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new streaming job to be created, but to prevent
updating an existing record set. Other values will result in a 412 Pre-condition Failed
response.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either StreamingJob or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~stream_analytics_management_client.models.StreamingJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.StreamingJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_replace_initial(
resource_group_name=resource_group_name,
job_name=job_name,
streaming_job=streaming_job,
if_match=if_match,
if_none_match=if_none_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('StreamingJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_replace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
job_name, # type: str
streaming_job, # type: "models.StreamingJob"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.StreamingJob"
"""Updates an existing streaming job. This can be used to partially update (ie. update one or two
properties) a streaming job without affecting the rest the job definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param streaming_job: A streaming job object. The properties specified here will overwrite the
corresponding properties in the existing streaming job (ie. Those properties will be updated).
Any properties that are set to null here will mean that the corresponding property in the
existing input will remain the same and not change as a result of this PATCH operation.
:type streaming_job: ~stream_analytics_management_client.models.StreamingJob
:param if_match: The ETag of the streaming job. Omit this value to always overwrite the current
record set. Specify the last-seen ETag value to prevent accidentally overwriting concurrent
changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StreamingJob, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.StreamingJob
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.StreamingJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(streaming_job, 'StreamingJob')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('StreamingJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
job_name=job_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
job_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.StreamingJob"
"""Gets details about the specified streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param expand: The $expand OData query parameter. This is a comma-separated list of additional
streaming job properties to include in the response, beyond the default set returned when this
parameter is absent. The default set is all streaming job properties other than 'inputs',
'transformation', 'outputs', and 'functions'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StreamingJob, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.StreamingJob
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.StreamingJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('StreamingJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.StreamingJobListResult"]
"""Lists all of the streaming jobs in the specified resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param expand: The $expand OData query parameter. This is a comma-separated list of additional
streaming job properties to include in the response, beyond the default set returned when this
parameter is absent. The default set is all streaming job properties other than 'inputs',
'transformation', 'outputs', and 'functions'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StreamingJobListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~stream_analytics_management_client.models.StreamingJobListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.StreamingJobListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('StreamingJobListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs'} # type: ignore
def list(
self,
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.StreamingJobListResult"]
"""Lists all of the streaming jobs in the given subscription.
:param expand: The $expand OData query parameter. This is a comma-separated list of additional
streaming job properties to include in the response, beyond the default set returned when this
parameter is absent. The default set is all streaming job properties other than 'inputs',
'transformation', 'outputs', and 'functions'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StreamingJobListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~stream_analytics_management_client.models.StreamingJobListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.StreamingJobListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('StreamingJobListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StreamAnalytics/streamingjobs'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
job_name, # type: str
start_job_parameters=None, # type: Optional["models.StartStreamingJobParameters"]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if start_job_parameters is not None:
body_content = self._serialize.body(start_job_parameters, 'StartStreamingJobParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
job_name, # type: str
start_job_parameters=None, # type: Optional["models.StartStreamingJobParameters"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts a streaming job. Once a job is started it will start processing input events and produce
output.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param start_job_parameters: Parameters applicable to a start streaming job operation.
:type start_job_parameters: ~stream_analytics_management_client.models.StartStreamingJobParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
job_name=job_name,
start_job_parameters=start_job_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/start'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops a running streaming job. This will cause a running streaming job to stop processing input
events and producing output.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
job_name=job_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/stop'} # type: ignore
def _scale_initial(
self,
resource_group_name, # type: str
job_name, # type: str
scale_job_parameters=None, # type: Optional["models.ScaleStreamingJobParameters"]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._scale_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if scale_job_parameters is not None:
body_content = self._serialize.body(scale_job_parameters, 'ScaleStreamingJobParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_scale_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/scale'} # type: ignore
def begin_scale(
self,
resource_group_name, # type: str
job_name, # type: str
scale_job_parameters=None, # type: Optional["models.ScaleStreamingJobParameters"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Scales a streaming job when the job is running.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param scale_job_parameters: Parameters applicable to a scale streaming job operation.
:type scale_job_parameters: ~stream_analytics_management_client.models.ScaleStreamingJobParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._scale_initial(
resource_group_name=resource_group_name,
job_name=job_name,
scale_job_parameters=scale_job_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_scale.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/scale'} # type: ignore
|
__author__ = 'dustinlee'
url = 'http://www.daehyunlee.com/dustinlee_new/'
url = 'http://127.0.0.1/dokuwiki/'
import tkinter
import wiki
wiki.connect(url, 'dustinlee', 'sisa0822')
|
from PIL import Image
from torchvision import transforms
import os
import torch
from torch.autograd import Variable
from torch import nn
from torchvision import models
from torch import optim
import matplotlib.pyplot as plt
'''1.ๅ ่ฝฝๅพๅ'''
#ๅฎไนๅพๅๅ ่ฝฝๅฝๆฐ
def load_img(img_path):
img=Image.open(img_path).convert('RGB')
img=img.resize((200,200))
img=transforms.ToTensor()(img)
img=img.unsqueeze(0)
return img
#ๅฎไนๅพๅๅฑ็คบๅฝๆฐ
def show_img(img):
img=img.squeeze(0)
img=transforms.ToPILImage()(img)
img.show()
#ๅ ่ฝฝๅๅพๅๅ้ฃๆ ผๅพๅ
path='C:/Users/T/Downloads/code-of-learn-deep-learning-with-pytorch-master/chapter9_Computer-Vision/neural-transfer/picture'
content_img=load_img(os.path.join(path,'dancing.jpg'))
content_img=Variable(content_img).cuda()
style_img=load_img(os.path.join(path,'style2.jpg'))
style_img=Variable(style_img).cuda()
input_img = content_img.clone()
'''2.ๅฎไนๆๅคฑๅฝๆฐ'''
#ๅฎไนๅ
ๅฎนๆๅคฑๅฝๆฐ็ฑป
class Content_Loss(nn.Module):
def __init__(self,target,weight):
super(Content_Loss,self).__init__()
self.weight = weight
#detachๆฏๅฐtargetไปๆจกๅไธญๅ็ฆปๅบๆฅ
self.target = target.detach() * self.weight
self.criterion=nn.MSELoss()
def forward(self, input):
self.loss=self.criterion(input*self.weight,self.target)
out=input.clone()
return out
def backward(self):
self.loss.backward(retain_variables=True)
return self.loss
#ๅฎไน้ฃๆ ผ็ฉ้ต็ฑป
class Gram(nn.Module):
def __init__(self):
super(Gram,self).__init__()
def forward(self, input):
a,b,c,d=input.size()
#ๅฐๅพๅ่ฟ่กๅฑๅผ๏ผๅๆ๏ผๆทฑๅบฆ๏ผ้ฟ*ๅฎฝ๏ผ็ๅฝขๅผ
feature=input.view(a*b,c*d)
#่ฎก็ฎๆทฑๅบฆไธคไธคไน้ด็ๅ
็งฏ
gram=torch.mm(feature,feature.t())
#่ฟ่กๆ ๅๅ
gram/=(a*b*c*d)
return gram
#ๅฎไน้ฃๆ ผๆๅคฑๅฝๆฐ็ฑป
class Style_Loss(nn.Module):
def __init__(self, target, weight):
super(Style_Loss, self).__init__()
self.weight = weight
self.target = target.detach() * self.weight
self.gram = Gram()
self.criterion = nn.MSELoss()
def forward(self, input):
G = self.gram(input) * self.weight
self.loss = self.criterion(G, self.target)
out = input.clone()
return out
def backward(self, retain_variabels=True):
self.loss.backward(retain_variables=retain_variabels)
return self.loss
'''3.ๅฎไนๆจกๅ'''
#้็จvgg19็ฅ็ป็ฝ็ป,ๅช้่ฆvgg19็ๅท็งฏๅฑ
vgg=models.vgg19(pretrained=True).features
vgg=vgg.cuda()
#ๆๅฎ่ฎก็ฎๅ
ๅฎนๅทฎๅผๅ้ฃๆ ผๅทฎๅผ้่ฆ็ๅฑ
content_layers_default=['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
#ๆจกๅ้ๆ
def get_style_model_and_loss(style_img,content_img,cnn=vgg,style_weight=1000,content_weight=1):
#ๅ
ๅฎนๅทฎๅผๅ่กจๅ้ฃๆ ผๅทฎๅผๅ่กจ
content_loss_list = []
style_loss_list = []
#ๅฎไนไธไธช็ฉบๆจกๅ
model=nn.Sequential().cuda()
#้ฃๆ ผ็ฉ้ต่ฎก็ฎๅฝๆฐ
gram=Gram().cuda()
#ๅผๅง้ๆ
i=1
for layer in cnn:
if isinstance(layer,nn.Conv2d):
name = 'conv_' + str(i)
model.add_module(name, layer)
if name in content_layers_default:
target = model(content_img)
content_loss = Content_Loss(target, content_weight)
model.add_module('content_loss_' + str(i), content_loss)
content_loss_list.append(content_loss)
if name in style_layers_default:
target = model(style_img)
target = gram(target)
style_loss = Style_Loss(target, style_weight)
model.add_module('style_loss_' + str(i), style_loss)
style_loss_list.append(style_loss)
i+=1
if isinstance(layer, nn.MaxPool2d):
name = 'pool_' + str(i)
model.add_module(name, layer)
if isinstance(layer, nn.ReLU):
name = 'relu' + str(i)
model.add_module(name, layer)
return model, style_loss_list, content_loss_list
'''4.่ฎญ็ปๆจกๅ'''
#ๆๅฎไผๅ็ๅๆฐไธบ่พๅ
ฅๅพๅ็ๅ็ด
def get_input_param_optimier(input_img):
input_param = nn.Parameter(input_img.data)
#่ฎบๆไฝ่
ๅปบ่ฎฎ็จLBFGSไฝไธบไผๅๅฝๆฐ
optimizer = optim.LBFGS([input_param])
return input_param, optimizer
#ๅฎไน่ฎญ็ปๅฝๆฐ
def run_style_transfer(content_img, style_img, input_img, num_epoches=300):
model, style_loss_list, content_loss_list = get_style_model_and_loss(
style_img, content_img)
input_param, optimizer = get_input_param_optimier(input_img)
epoch = [0]
while epoch[0] < num_epoches:
def closure():
input_param.data.clamp_(0, 1)
model(input_param)
style_score = 0
content_score = 0
optimizer.zero_grad()
for sl in style_loss_list:
style_score += sl.backward()
for cl in content_loss_list:
content_score += cl.backward()
epoch[0] += 1
if epoch[0] % 50 == 0:
print('run {}'.format(epoch))
print('Style Loss: {:.4f} Content Loss: {:.4f}'.format(
style_score.data[0], content_score.data[0])
)
return style_score + content_score
optimizer.step(closure)
input_param.data.clamp_(0, 1)
return input_param.data
#ๅผๅง่ฎญ็ป
out = run_style_transfer(content_img, style_img, input_img, num_epoches=200)
show_img(out.cpu())
save_pic = transforms.ToPILImage()(out.cpu().squeeze(0))
save_pic.save(os.path.join(path,'output.jpg'))
#ๆพ็คบๅพๅ
fig=plt.figure()
fig.add_subplot(1,3,1)
plt.imshow(plt.imread(os.path.join(path,'dancing.jpg')))
plt.axis('off')
fig.add_subplot(1,3,2)
plt.imshow(plt.imread(os.path.join(path,'style2.jpg')))
plt.axis('off')
fig.add_subplot(1,3,3)
plt.imshow(plt.imread(os.path.join(path,'output.jpg')))
plt.axis('off')
|
# Generated by Django 3.1.5 on 2021-05-20 19:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hotel', '0003_hotel_created'),
]
operations = [
migrations.RenameField(
model_name='hotel',
old_name='created',
new_name='created_on',
),
]
|
## ๋ฌธ์ 6.
## ์์๋ฅผ ํฌ๊ธฐ ์์ผ๋ก ๋์ดํ๋ฉด 2, 3, 5, 7, 11, 13, ... ๊ณผ ๊ฐ์ด ๋ฉ๋๋ค.
## ์ด ๋ 10,001๋ฒ์งธ์ ์์๋ฅผ ๊ตฌํ์ธ์.
|
import secrets
class User():
""" Model a user as it is kept in the database. """
__slots__ = ['name', 'password', 'crypt_key']
def __init__(self, name, password, crypt_key):
""" Initialize all fields. """
self.name = name
self.password = password
self.crypt_key = crypt_key
def db_data(self):
""" Return user data as a tuple.
The order of the fields is the same as in the database.
"""
return (self.name, self.password, self.crypt_key)
def create_user(name, password):
""" Return a User object from the parameters.
A random encryption key for the user's account database
is generated automatically.
:param name: the user's name
:param password: the user's password
"""
password = secrets.encrypt_field(password)
crypt_key = secrets.encrypt_field(secrets.random_fernet_key())
return User(name, password, crypt_key)
def unpack(us_tuple):
""" Return a User object from a tuple. """
return User(us_tuple[0], us_tuple[1], us_tuple[2])
|
import os
import sqlite3
import pandas as pd
import psycopg2
# Create a database for local environment
# conn = sqlite3.connect('flow-ez.db')
# conn = sqlite3.connect('flow-ez.db', check_same_thread=False) ## Important
# cursor = conn.cursor()
# conn.row_factory = sqlite3.Row
# Create Huroku remote DB
connection = psycopg2.connect(user = "csefwzficaoouh",
password = "4bef0ab168c67e5aeebb8152e3de4995e5cb733268609c5b13d42348a51dd8f3",
host = "ec2-174-129-254-217.compute-1.amazonaws.com",
port = "5432",
database = "d30b3p3ckp94hl")
DATABASE_URL = os.environ['DATABASE_URL']
print('DB URL: ', DATABASE_URL)
conn = psycopg2.connect(DATABASE_URL)
# Make a convenience function for running SQL queries
def sql_query(query):
cur = conn.cursor()
cur.execute(query)
# rows = cur.fetchall()
rows = [dict(first_name=row[0],last_name=row[1],mea_date=row[2],disp_date=row[3],time_1=row[4],dev_id=row[5], qr_code=row[6],loc=row[7],res=row[8],prob=row[9]) for row in cur.fetchall()]
return rows
conn.commit() #Andy
def sql_edit_insert(query,var):
cur = conn.cursor()
cur.execute(query,var)
conn.commit()
def sql_delete(query,var):
cur = conn.cursor()
cur.execute(query,var)
conn.commit()
def sql_query2(query,var):
cur = conn.cursor()
cur.execute(query,var)
rows = cur.fetchall()
return rows
conn.commit() #Andy
|
x=5
x=input("Enter value of x:")
y=10
y=input("Enter value of y:")
#create a temporary varibles and swap the values
temp=x
x=y
y=temp
print("The value of x after swapping:{}"format(x))
print("The value of y before swapping:{}"format(y)) |
''' OFFLINE TIMER for future use'''
import atexit
import datetime
import os
import pickle
import time
def save(): # save daty uplyniecia czasu
with open('timersave.pkl', 'wb') as f:
pickle.dump(stop, f)
atexit.register(save)
# print(stop) # test
if os.stat("timersave.pkl").st_size != 0: # Load timersave if it exists
with open('timersave.pkl', 'rb') as f:
stop = pickle.load(f)
check1 = str(stop)
check2 = str(datetime.datetime.now())
if check1 < check2:
print("TIME PASSED")
stop = datetime.datetime.now() + datetime.timedelta(0,
20 * 0 + 10
* 1 + 0) #
# data uplyniecia czasu
delta = datetime.timedelta()
x = datetime.timedelta(delta.days,
delta.seconds) # formatowanie
# pozostalego czasu pod print
else:
print("Your task is not completed yet")
else:
hours = int(input("How many hours would you like to spend at work? (1-8)"))
stop = datetime.datetime.now() + datetime.timedelta(0,
60 * 60 * hours) #
# data uplyniecia czasu
delta = datetime.timedelta()
x = datetime.timedelta(delta.days,
delta.seconds) # formatowanie pozostalego czasu
# pod print
# print(stop) # retest
def delting():
global delta, x
delta = stop - datetime.datetime.now()
x = datetime.timedelta(delta.days, delta.seconds)
if x > datetime.timedelta():
print("\r{}".format(x), end="")
def delting_loop():
global x
count = 5
while x > datetime.timedelta() and count > 0:
delting()
time.sleep(1)
count -= 1
if x <= datetime.timedelta():
print("\rTIME PASSED", end='')
delting()
delting_loop()
'''print(stop)
print(delta)
print(x)'''
# save()
|
# Generated by Django 2.0.5 on 2018-09-12 18:31
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("barriers", "0009_auto_20180911_2033"),
]
operations = [
migrations.CreateModel(
name="BarrierCompany",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_on",
models.DateTimeField(auto_now_add=True, db_index=True, null=True),
),
("modified_on", models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.CreateModel(
name="DatahubCompany",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4, primary_key=True, serialize=False
),
),
(
"name",
models.CharField(
blank=True, help_text="Trading name", max_length=255, null=True
),
),
],
),
migrations.AddField(
model_name="barriercontributor",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="barriercontributor",
name="modified_on",
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name="barrierinstance",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="barrierinstance",
name="modified_on",
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name="barrierinteraction",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="barrierinteraction",
name="modified_on",
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name="barrierreportstage",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="barrierreportstage",
name="modified_on",
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name="barriercontributor",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="barriercontributor",
name="created_on",
field=models.DateTimeField(auto_now_add=True, db_index=True, null=True),
),
migrations.AlterField(
model_name="barrierinstance",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="barrierinstance",
name="created_on",
field=models.DateTimeField(auto_now_add=True, db_index=True, null=True),
),
migrations.AlterField(
model_name="barrierinteraction",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="barrierinteraction",
name="created_on",
field=models.DateTimeField(auto_now_add=True, db_index=True, null=True),
),
migrations.AlterField(
model_name="barrierreportstage",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="barrierreportstage",
name="created_on",
field=models.DateTimeField(auto_now_add=True, db_index=True, null=True),
),
migrations.AddField(
model_name="barriercompany",
name="barrier",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="companies_affected",
to="barriers.BarrierInstance",
),
),
migrations.AddField(
model_name="barriercompany",
name="company",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="companies_affected",
to="barriers.DatahubCompany",
),
),
migrations.AddField(
model_name="barriercompany",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="barriercompany",
name="modified_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="barrierinstance",
name="companies",
field=models.ManyToManyField(
help_text="companies affected by barrier",
related_name="companies",
through="barriers.BarrierCompany",
to="barriers.DatahubCompany",
),
),
migrations.AlterUniqueTogether(
name="barriercompany", unique_together={("barrier", "company")}
),
]
|
from django.http import response
from django.test import TestCase, client
from .models import Tweet
from django.contrib.auth.models import User
from rest_framework.test import APIClient
class TweetTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="abc",password="password")
self.user2 = User.objects.create_user(username="cad",password="password")
Tweet.objects.create(content="my tweet",user=self.user)
Tweet.objects.create(content="my second tweet",user=self.user2)
def test_user_created(self):
tweet = Tweet.objects.create(content="my third tweet",user=self.user)
self.assertEqual(tweet.id,3)
self.assertEqual(tweet.user, self.user)
def get_client(self):
client = APIClient()
client.login(username=self.user, password='password')
return client
def test_tweet_list(self):
client = self.get_client()
response = client.get('/api/tweets/')
self.assertEqual(response.status_code, 200)
def test_tweet_list(self):
client = self.get_client()
response = client.get('/api/tweets/')
self.assertEqual(response.status_code, 200)
def test_tweet_create(self):
data = {"content": "This is my new tweet"}
client = self.get_client()
response = client.post('/api/tweets/create/',data)
self.assertEqual(response.status_code, 201)
def test_detail_view(self):
client = self.get_client()
response = client.get('/api/tweets/1/')
self.assertEqual(response.status_code, 200)
id = response.json().get("id")
self.assertEqual(id,1)
def test_tweet_action_like(self):
client = self.get_client()
response = client.post('/api/tweets/action/',{"id":1, "action": "like" })
self.assertEqual(response.status_code, 200)
like_count = response.json().get("likers")
self.assertEqual(like_count,1)
def test_tweet_action_unlike(self):
client = self.get_client()
response = client.post('/api/tweets/action/',{"id":1,"action":"like"})
self.assertEqual(response.status_code,200)
like_count = response.json().get("likers")
self.assertEqual(like_count,1)
response = client.post('/api/tweets/action/',{"id":1,"action":"unlike"})
self.assertEqual(response.status_code,200)
like_count = response.json().get("likers")
self.assertEqual(like_count,0)
def test_tweet_action_retweet(self):
client = self.get_client()
response = client.post('/api/tweets/action/',{"id":2, "action": "retweet" })
self.assertEqual(response.status_code,201)
data = response.json()
new_tweet_id = data.get("id")
self.assertNotEqual(2,new_tweet_id)
def test_tweet_delete_api_view(self):
client = self.get_client()
response = client.delete("/api/tweets/1/delete/")
self.assertEqual(response.status_code, 200)
response = client.delete("/api/tweets/1/delete/")
self.assertEqual(response.status_code, 404)
response_incorrect_owner = client.delete("/api/tweets/2/delete/")
self.assertEqual(response_incorrect_owner.status_code, 401)
|
class Message(object):
def __init__(self, data, conn, stream):
self.data = data
self.conn = conn
self.stream = stream
|
from flask import Flask
from flask import jsonify
import json
import sqlite3
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/api/v1/info')
def home_index():
conn = sqlite3.connect(jdbc:sqlite:identifier.sqlite)
print("Open DB successfully!")
api_list = []
cursor = conn.execute("Select buildtime,version,methods,links from apirelease")
for row in cursor:
a_dict = {}
a_dict['version'] = row[1]
a_dict['buildtime'] = row[0]
a_dict['methods'] = row[2]
a_dict['links'] = row[3]
api_list.append(a_dict)
conn.close()
return jsonify({api_version: api_list}),200
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000,debug=True)
|
class Scene(object):
"""Abstract Scene"""
def __init__(self, scene_manager):
self.manager = scene_manager
def render(self, screen):
raise NotImplementedError
def update(self):
raise NotImplementedError
def handle_events(self, e):
raise NotImplementedError
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 01:09:06 2020
@author: dd394
"""
import pygame
pygame.init()
class BUTTON:
def __init__(self,position,text):
self.width = 310
self.height = 65
self.left, self.top = position
self.text = text
def draw(self,screen):
pygame.draw.line(screen,(150, 150, 150), (self.left, self.top), (self.left+self.width, self.top), 5)
pygame.draw.line(screen,(150, 150, 150), (self.left, self.top-2), (self.left, self.top+self.height), 5)
pygame.draw.line(screen,(50, 50, 50), (self.left, self.top+self.height), (self.left+self.width, self.top+self.height), 5)
pygame.draw.line(screen,(50, 50, 50), (self.left+self.width, self.top+self.height), [self.left+self.width, self.top], 5)
self.rect = pygame.draw.rect(screen,(100, 100, 100),(self.left, self.top, self.width, self.height))
font=pygame.font.SysFont("Arial",45)
cont=font.render(self.text,1,( 255, 0, 0))
screen.blit(cont,(self.left+50,self.top+5))
"""
back = pygame.image.load(r'startinterface.png')
back_rect = back.get_rect()
screen= pygame.display.set_mode((1060,546))
screen.blit(back,back_rect)
button1 = BUTTON(screen,(350,200)," cool")
button2 = BUTTON(screen,(350,300),"mingrixiang")
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
pygame.display.flip()
""" |
"""Web application for XFormTest
http://xform-test.pma2020.org
http://xform-test-docs.pma2020.org
"""
import json
from glob import glob
import os
import sys
from flask import render_template, jsonify, request, Blueprint
from werkzeug.utils import secure_filename
# noinspection PyProtectedMember
from .static_methods import _return_failing_result, _run_process
from .config import HEROKU_ERR_EVERY_TIME, XFORM_TEST_EXECUTABLE, LOGGING_ON, \
TEMP_DIR, PKG_NAME, settings, template, path_char, basedir
routes = Blueprint(PKG_NAME, __name__)
@routes.route('/')
def index():
"""Index"""
return render_template(template, **settings)
# TODO: does having "xform_test" here in front work?
@routes.route('/xform_test/<string:filename>')
def xform_test(filename):
"""Runs XFormTest CLI."""
try:
if filename.endswith('.xls') or filename.endswith('.xlsx'):
xml = filename.replace('.xlsx', '.xml').replace('.xls', '.xml')
command = 'xls2xform ' + TEMP_DIR + path_char + filename + ' ' + \
TEMP_DIR + path_char + xml
stdout, stderr = _run_process(command)
stderr = '' if stderr == HEROKU_ERR_EVERY_TIME else stderr
# err when converting to xml
if stderr:
return _return_failing_result(stderr, stdout)
else:
xml = filename
command = 'java -jar ' + XFORM_TEST_EXECUTABLE + ' ' \
+ TEMP_DIR + path_char + xml
stdout, stderr = _run_process(command)
stderr = '' if stderr == HEROKU_ERR_EVERY_TIME else stderr
for file in glob('temp/*'):
os.remove(file)
# err when running xform-test
if stderr:
return _return_failing_result(stderr, stdout)
# passing result
result = json.loads(stdout)
success = result['successMsg']
warnings = result['warningsMsg']
return render_template(template, success=success,
warnings=warnings,
error=stderr if LOGGING_ON else '',
**settings)
# unexpected err
except Exception as err:
print(str(err), file=sys.stderr)
return render_template(template, error=str(err),
**settings)
@routes.route('/upload', methods=['POST'])
def upload():
"""Upload"""
try:
file = request.files['file']
filename = secure_filename(file.filename)
upload_folder = basedir + path_char + TEMP_DIR
file_path = os.path.join(upload_folder, filename)
if os.path.exists(file_path):
os.remove(file_path)
try:
file.save(file_path)
except FileNotFoundError:
os.mkdir(upload_folder)
file.save(file_path)
return jsonify({'success': True, 'filename': filename})
except Exception as err:
msg = 'An unexpected error occurred:\n\n' + str(err)
return jsonify({'success': False, 'message': msg})
|
import mnistDataLoader
from neural_network import NeuralNetwork
from config import *
import torch
net = NeuralNetwork()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Using device: "+str(device))
net.to(device)
criterion = net.get_criterion()
optimizer = net.get_optimizer()
train_data = mnistDataLoader.get_trainloader()
for epoch in range(epochs):
running_loss = 0.0
for index, data in enumerate(train_data):
# ใพใใฏๅพ้
ใใผใญใซ
optimizer.zero_grad()
inputs, labels = data[0].to(device), data[1].to(device)
outputs = net(inputs)
loss = criterion(outputs)
loss.backward()
optimizer.step()
running_loss = loss.item()
if index % 100 ==99:
print("epoch: &d, step: &d, loss: %3f" % (epoch+1,index+1,running_loss/100))
net = net.to('cpu')
torch.save(net.state_dict(), "model/nn")
print("Finished train") |
import unittest
# class definition for Operation
class Operation(object):
def __init__(self, n1, n2):
self.n1 = n1
self.n2 = n2
def add(self):
return self.n1 + self.n2
def sub(self):
return self.n1 - self.n2
def mul(self):
return self.n1 * self.n2
def div(self):
return self.n1 / self.n2
if __name__== "__main__":
op = Operation(100, 20)
print "Sum: ", op.add()
print "Difference: ", op.sub()
print "Product: ", op.mul()
print "Quotient: ", op.div()
# Test Case
class OperationTestCase(unittest.TestCase):
def test_both_minus(self):
op = Operation(-100, -20)
self.assertEqual(op.add(), -120)
self.assertEqual(op.sub(), -80)
self.assertEqual(op.mul(), 2000)
self.assertEqual(op.div(), 5)
def test_first_minus(self):
op = Operation(-100, 20)
self.assertEqual(op.add(), -80)
self.assertEqual(op.sub(), -120)
self.assertEqual(op.mul(), -2000)
self.assertEqual(op.div(), -5)
|
import numpy as np
#trova la matrice inversa di A modulo 26, dato il suo determinante (che controlla essere coprimo con 26)
def modular_inverse(A, detA):
m = len(A)
inverse = np.zeros(shape=(m, m))
detminus1 = mulinv(detA, 26)
for i in range(m):
for j in range(m):
newA = getsubmatrix(A, j, i)
det1 = np.linalg.det(newA)
inverse[i][j] = ((-1) ** (i + j) * detminus1 * det1) % 26
return inverse
#Applicazione dell'algoritmo di euclide esteso per trovare l'inverso moltiplicativo di un numero modulo 26
def xgcd(b, a):
x0, x1, y0, y1 = 1, 0, 0, 1
while a != 0:
q, b, a = b // a, a, b % a
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return b, x0, y0
def mulinv(b, n):
g, x, _ = xgcd(b, n)
if g == 1:
return x % n
#ottiene la sottomatrice togliendo ad A la riga i e la colonna j
def getsubmatrix(A, noti, notj):
newA = np.delete(np.delete(A, noti, 0), (notj), 1)
return newA
#esegue la moltiplicazione tra matrici (o tra matrice e vettore, gestito nel caso in cui la dimensione delle colonne
#sia unitaria) modulo 26
def modmatmul(A, B):
rows = A.shape[0]
try:
col = (B.shape[1])
res = np.zeros(shape=(rows, col))
except IndexError:
col = 1
res = np.zeros(shape=(rows,))
if col == 1:
for i in range(rows):
res[i] = int(round(np.dot(A[i, :], B))) % 26
else:
for i in range(rows):
for j in range(col):
a = A[i, :]
b = B[:, j]
res[i][j] = int(round(np.dot(A[i, :], B[:, j]) % 26))
return res
|
#!/usr/bin/env python
import sys
from optparse import OptionParser
p = OptionParser()
p.add_option("-g", "--gui", dest="gui", default="Term",
help="Which gui to use, Term or QT")
p.add_option("-c", "--config", dest="configfile",
help="Use this config file instead of the system ones.")
(options, args) = p.parse_args()
if options.gui.lower() == "qt":
from FfmpegQtGui import FfmpegQtGui
try:
from PyQt4 import QtGui, QtCore
#QtGui.QApplication
except:
print "PyQt4 is needed for this Gui"
else:
app = QtGui.QApplication(sys.argv)
ff = FfmpegQtGui(args)
ff.show()
#ff.Main(args)
sys.exit(app.exec_())
if options.gui.lower() == "term":
from FfmpegTermGui import FfmpegTermGui
ff = FfmpegTermGui()
ff.Main(args)
|
from django.db import models
from cms.models.fields import PlaceholderField
class Message(models.Model):
message = PlaceholderField('message')
def __str__(self):
return self.message
class Meta:
verbose_name = 'Message'
verbose_name_plural = 'Messages'
class User(models.Model):
nick = models.CharField(
verbose_name='Nick',
unique=True,
max_length=33)
email = models.EmailField(
unique=True,
verbose_name='Email')
password = models.CharField(
verbose_name='Password',
max_length=33)
message = models.ForeignKey(
Message,
on_delete=models.SET('User deleted'),
verbose_name='Message',
null=True)
def __str__(self):
return self.nick
class Meta:
verbose_name = 'User'
verbose_name_plural = 'Users'
class Topic(models.Model):
name = models.TextField(
verbose_name='Topic')
message = models.ForeignKey(
Message,
verbose_name='Message',
on_delete=models.CASCADE,
null=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Topic'
verbose_name_plural = 'Topics'
|
"""
ไธไธชๅพๅฝข่ทๅ้ข็งฏ็ๆฅๅฃไธไธๆ ท๏ผๅฆๆๅฝข็ถๆ่ฟไธชๅฑๆง
"""
# from lib1 import Circle
# from lib2 import Triangle
# from lib3 import Rectangle
from operator import methodcaller
class Circle:
def __init__(self,r):
self.r = r
def area(self):
return self.r **2 *3.14
class Triangle:
def __init__(self,a,b,c):
self.a,self.b,self.c = a,b,c
def get_area(self):
a, b, c = self.a,self.b,self.c
p = (a+b+c)/2
return (p*(p-a)*(p-b)*(p-c))*0.5
class Rectangle:
def __init__(self,a,b):
self.a,self.b = a,b
def getArea(self):
return self.a*self.b
def get_area(shape, method_name = ['area', 'get_area', 'getArea']):
for name in method_name:
if hasattr(shape, name):
# methodcaller๏ผๆนๆณ๏ผๅๆฐ๏ผ๏ผ่ฐ่ฐ็จ๏ผ
return methodcaller(name)(shape)
# f = getattr(shape, name, None)
# if f:
# return f()
shape1 = Circle(1)
shape2 = Triangle(3, 4, 5)
shape3 = Rectangle(4, 6)
shape_list = [shape1, shape2, shape3]
# ่ทๅพ้ข็งฏๅ่กจ
area_list = list(map(get_area, shape_list))
print(area_list)
|
'''
Bitwise Operation
Operation each bit
example : int 1 = 00000001
int 2 = 00000010
int 9 = 00001001
'''
a = 8
b = 5
c = a | b
# Bitwise OR (|)
print ('=============OR============')
print (' int:',a,',binary:',format(a,'08b'))
print (' int:',b,',binary:',format(b,'08b'))
print ('-----------------------------(|)') # operation OR
print ('bitwise:',c,',binary:',format(c,'08b'))
# Bitwise AND (&)
c = a & b
print ('=============AND============')
print (' int:',a,',binary:',format(a,'08b'))
print (' int:',b,',binary:',format(b,'08b'))
print ('-----------------------------(&)') # operation AND
print ('bitwise:',c,',binary:',format(c,'08b'))
# Bitwise XOR
c = a ^ b
print ('=============XOR============')
print (' int:',a,',binary:',format(a,'08b'))
print (' int:',b,',binary:',format(b,'08b'))
print ('-----------------------------(^)') # operation XOR
print ('bitwise:',c,',binary:',format(c,'08b'))
# Bitwise NOT (~)
c = ~a
print ('=============NOT============')
print (' int:',a,',binary:',format(a,'08b'))
print ('-----------------------------(~)') # operation NOT
print (' int:',c,',binary:',format(c,'08b'))
d = 0b00001001 # is int 9
e = 0b11111111 # is XOR from 9
print (' int:',e^d,',binary:',format(e^d,'08b'))
# Shifting for (shift right(>>))
print ('=============Shift Right============')
x1 = a >> 1
x2 = a >> 2
x3 = a >> 3
print (' int:',a,',binary:',format(a,'08b'))
print (' shift: 1',',binary:',format(x1,'08b'))
print (' shift: 2',',binary:',format(x2,'08b'))
print (' shift: 3',',binary:',format(x3,'08b'))
# Shifting for (shift left(>>))
print ('=============Shift Left============')
x1 = a << 1
x2 = a << 2
x3 = a << 3
print (' int:',a,',binary:',format(a,'08b'))
print (' shift: 1',',binary:',format(x1,'08b'))
print (' shift: 2',',binary:',format(x2,'08b'))
print (' shift: 3',',binary:',format(x3,'08b'))
|
import numpy as np
from .cykmeans import cy_ikmeans, cy_ikmeans_push, algorithm_type_ikmeans
def ikmeans(data, num_centers, algorithm="LLOYD", max_num_iterations=200,
verbose=False):
"""
Integer K-means
Parameters
----------
data : [N, D] `uint8` `ndarray`
Data to be clustered
num_centers : `int`
Number of clusters (leaves) per level
algorithm : {'LLOYD', 'ELKAN'}, optional
Algorithm to be used for clustering.
max_num_iterations : `int`, optional
Maximum number of iterations before giving up (the algorithm
stops as soon as there is no change in the data to cluster
associations).
verbose : bool, optional
If ``True``, be verbose.
Returns
-------
(centers, assignments) : ([num_centers, D] `int32` `ndarray`, [N,] `uint32` `ndarray`)
Computed centers of the clusters and their assignments
"""
assert isinstance(data, np.ndarray)
assert isinstance(num_centers, int)
assert isinstance(verbose, bool)
if data.ndim != 2:
raise ValueError('Data should be a 2-D matrix')
if data.dtype != np.uint8:
raise ValueError('Data should be uint8')
if num_centers > data.shape[0]:
raise ValueError('num_centers should be a positive integer smaller '
'than the number of data points')
algorithm_b = algorithm.encode()
if algorithm_b not in algorithm_type_ikmeans.keys():
raise ValueError('algorithm field invalid')
if (not isinstance(max_num_iterations, int)) or max_num_iterations <= 0:
raise ValueError('max_num_iterations should be a positive integer')
return cy_ikmeans(data, num_centers, algorithm_b, max_num_iterations,
verbose)
def ikmeans_push(data, centers):
"""
Projects the data on the KMeans nearest elements (similar to
kmeans_quantize but for integer data).
Parameters
----------
data : [N, D] `uint8` `ndarray`
Data to be projected to the centers assignments
centers : [K, D] `int32` `ndarray`
Centers positions
Returns
-------
assignments : [N,] `uint32` `ndarray`
Assignments of the data points to their respective clusters indice.
"""
assert isinstance(data, np.ndarray)
if data.ndim != 2:
raise ValueError('Data should be a 2-D matrix')
if data.dtype != np.uint8:
raise ValueError('Data should be uint8')
if centers.ndim != 2:
raise ValueError('Centers should be a 2-D matrix')
if centers.dtype != np.int32:
raise ValueError('Centers should be int32')
return cy_ikmeans_push(data, centers)
|
#python
with open("container.yaml","r") as stream :
try :
yaml_data = yaml_load(stream)
download = yaml_data['Download']
except yaml.YAMLERROR as exc:
print(exc) |
from PyQt5.Qt import *
from PyQt5 import QtGui
from Object_IQA_Software.resource.main_iqa_ui import Ui_MainWindow #่ฎฐๅพๆน๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
# from Object_IQA_Software.Batch_NR_Pane import BatchNRPane
from Object_IQA_Software.method.NR_IQA_method.NR_IQA_algorithm import *
from Object_IQA_Software.method.FR_IQA_method.FR_IQA_algorithm import FR_IQA_method
# from Mymatlabexe.method.Ours import *
import numpy as np
import matlab.engine
class MainIQAPane(QMainWindow, Ui_MainWindow):
start_a_batch_nr_pane_signal = pyqtSignal(str)
# nr_iqa_algorithm_signal = pyqtSignal(str, str)
def __init__(self, parent=None, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
#ๆๅผ่ๆฏๅพ็็่ฎพ็ฝฎ
self.setAttribute(Qt.WA_StyledBackground, True)
self.setupUi(self)
# ้ป่ฎค้่็ฌฌไบไธช็
ง็lb๏ผ้ป่ฎค่ฏไปทtabๆฏNR_IQA
self.img_show_lb_2.hide()
self.tool_widget.setCurrentWidget(self.NR_IQA_tab)
# ๅๅงๅๅๆฐ๏ผ้ป่ฎค้ๅพ็็ๅฐๅใๅพ็ๅฐๅ
self.init_open_addr = r'C:\Users'
self.nr_iqa_pho_addr = None
self.fr_iqa_pho_addr_1 = None
self.fr_iqa_pho_addr_2 = None
# ๅฎไนไธค็ฑป็ฎๆณ๏ผๅนถๅๅ
ฅcombobox
nr_algorithm_list = ['BIQI', 'BRISQUE', 'NIQE', 'BLIINDS_2', 'DESIQUE', 'CPBD',
'FISH', 'FISH_bb', 'S3', 'LPC', 'DIIVINE', 'Martziliano', 'NJQA']
self.algorithm_comboBox.addItems(nr_algorithm_list)
fr_algorithm_list = ['MSE', 'RMSE', 'PSNR', 'SSIM', 'UQI', 'MS-SSIM', 'ERGAS', 'SCC', 'RASE', 'SAM', 'VIF_P']
self.fr_algorithm_comboBox.addItems(fr_algorithm_list)
# ๅพ็ๆพ็คบๆฏไพratio_of_photo๏ผ ็ผฉๆพๆฏไพๆฐ็ป,pho_show_scale_single _doubleๅๅซๆฏๅๅผ ็
ง็ๅๅค็
ง็
self.ratio_of_photo = 4/3
self.pho_show_scale_single = np.round((np.arange(0.50, 0.76, 0.02)), 2).tolist()
self.pho_show_scale_double = np.round((np.arange(0.33, 0.47, 0.01)), 2).tolist()
self.curr_phot_show_scale = [0.70, 0.40] # ๅๅซไปฃ่กจๅ็
ง็ๅไธคๅผ ็
ง็็้ป่ฎคๅฐบๅฏธ
self.current_photo_info = []
# self.isclear_figure = False
self.isfist_readimg = True
# ๆฏๅฆ้่ๅทฅๅ
ทๆ
self.ishidden_tool_wid = False
# ๅๅงๅไธไธชmatlab engine
self.eng = matlab.engine.start_matlab()
# ๅๅงๅไธไธช่ฏๅ็บฟ็จ
self.nr_iqa_thread = My_NR_IQA_Thread()
self.nr_iqa_thread.score_signal.connect(self.nr_iqa_score_callback)
self.fr_iqa_thread = My_FR_IQA_Thread()
self.fr_iqa_thread.fr_score_signal.connect(self.fr_iqa_score_callback)
# ไปๅฎไนไธไธๅณ้ฎ่ๅ๏ผ็จไฝๅพ็็็ผฉๆพ
self.img_show_lb.setContextMenuPolicy(Qt.CustomContextMenu)
self.img_show_lb.customContextMenuRequested.connect(self.quick_change_pho_by_mouse)
# ๅฎไนๆนๅค็ไฟกๅท
self.file_menu.triggered[QAction].connect(self.change_setting_of_file)
self.is_batch_active = False
# ๅฎไนๆนๅค็ๆงฝๅฝๆฐ
def change_setting_of_file(self, action_name):
action_name = str(action_name.text())
self.start_a_batch_nr_pane_signal.emit(action_name)
# if action_name in ['BIQI', 'BRISQUE', 'NIQE', 'BLIINDS_2', 'CPBD', 'NJQA']:
# if self.batch_nr_pane == 1:
# #1 ไปฃ่กจ็ฎๅๆฒกๆไปปๅก
# self.batch_nr_pane = BatchNRPane(action_name)
# self.batch_nr_pane.show()
# NR_IQA็ธๅ
ณๆงฝๅฝๆฐๅ็บฟ็จ
def nr_iqa_score_callback(self, i):
self.real_mark_lb.setText(i)
# ้ๆฐไฝฟ่ฝๅผๅง่ฏไปทๅนถๆนๅญ
self.start_mark_btn.setEnabled(True)
self.start_mark_btn.setText('ๅผๅง่ฏไปท')
def choose_pho_from_pc(self):
save_path_tuple = QFileDialog.getOpenFileNames(self,
"่ฏท้ๆฉไธๅผ ๆจๆณ่ฆๅNR_IQA็ๅพ็",
self.init_open_addr,
"JPG Files (*.jpg);;PNG Files (*.png);;BMP Files (*.bmp)")
# ๆๅฅฝ่ฟๆฏๅซ็จ all file "All Files (*);;JPG Files (*.jpg);;PNG Files (*.png);;BMP Files (*.bmp)"
# ้ฒๆญข็จๆท้ๅบ๏ผๆฒกๆ้ๆไปถ๏ผๆ
้ๅคๆญtuple็ฌฌไธไธชๅ
็ด ๏ผๅณๆไปถๅฐๅๆฏๅฆไธบ็ฉบ
if save_path_tuple[0] == []:
pass
else:
# ๆดๆฐๅฐๅๆพ็คบๆๆฌๆก
self.nr_iqa_pho_addr = save_path_tuple[0][0]
print(self.nr_iqa_pho_addr)
self.refresh_curr_pho_info(is_from_choose=True, pho_num=0)
# ๆดๆฐไธปๆพ็คบ็้ขๅ็ถๆๆ
self.show_nr_iqa_photo()
def pho_zoom_in(self):
# nr็
ง็ๆพๅคง
if self.pho_addr_show_le.text() != '':
next_index = self.pho_show_scale_single.index(self.curr_phot_show_scale[0])
print(next_index)
if self.pho_show_scale_single[next_index] == self.pho_show_scale_single[-1]:
pass
print('ๅฐ้กถไบ')
else:
next_index += 1
self.curr_phot_show_scale[0] = self.pho_show_scale_single[next_index]
self.show_nr_iqa_photo(curr_scale=self.curr_phot_show_scale)
def pho_zoom_out(self):
# nr็
ง็็ผฉๅฐ
if self.pho_addr_show_le.text() != '':
pre_index = self.pho_show_scale_single.index(self.curr_phot_show_scale[0])
print(pre_index)
if self.pho_show_scale_single[pre_index] == self.pho_show_scale_single[0]:
pass
print('ๅฐๅบไบ')
else:
pre_index -= 1
self.curr_phot_show_scale[0] = self.pho_show_scale_single[pre_index]
self.show_nr_iqa_photo(curr_scale=self.curr_phot_show_scale)
def pho_zoom_reset(self):
if self.pho_addr_show_le.text() != '':
# ๆดๆฐๅฝๅๅพ็ไฟกๆฏ ใstatusbarใๆดๆฐ้ขๆฟ
self.refresh_curr_pho_info(is_from_choose=False, pho_num=0)
self.curr_phot_show_scale = [0.7, 0.4]
self.show_nr_iqa_photo()
def start_mark(self):
if self.pho_addr_show_le.text() != '':
# ่ทๅๅฝๅ็็ฎๆณๅๅพ็ๅฐๅ
algorithm = self.algorithm_comboBox.currentText()
photo_addr = self.pho_addr_show_le.text()
print(algorithm, photo_addr)
# ๅๆถไฝฟ่ฝ ๅผๅง่ฏไปทๆ้ฎ ๅนถๆ็คบโๆญฃๅจ่ฟ่กโ
self.start_mark_btn.setEnabled(False)
self.start_mark_btn.setText('ๆญฃๅจ่ฏไปท')
# ๅผๅฏ็บฟ็จๅนถ่ฟ่ก
self.nr_iqa_thread.setting(algorithm, photo_addr, eng=self.eng)
self.nr_iqa_thread.start()
# self.nr_iqa_algorithm_signal.emit(algorithm, photo_addr)
else:
self.you_should_choose_pho_first = QMessageBox.warning(self,
'ๆธฉ้ฆจๆ็คบ',
'่ฏทๅ
ๅจๅณไธ่ง็กฎ่ฎคๆจๅทฒ้ๆฉไบๅพ็ๅ็ฎๆณ',
QMessageBox.Ok)
def ishide_toolmenu(self):
if self.ishidden_tool_wid == False:
self.tool_widget.hide()
self.ishidden_tool_wid = True
else:
self.tool_widget.show()
self.ishidden_tool_wid = False
def show_nr_iqa_photo(self, curr_scale=None):
# pho_show_scale๏ผ็จไบๆงๅถๆพ็คบๆฏไพ๏ผ่ฟไธชไธป่ฆๆฏไธคไธช็
ง็ๅๆถๆพ็คบ็ๆถๅๅๅๅผ ๆพ็คบ็ๅบๅซ
# ๅๅผ ไธ่ฌ0.7 ไธคๅผ ๅฐฑ0.4
if self.pho_addr_show_le.text() == '':
pass
else:
if curr_scale == None:
self.show_iqa_photo(dis_mode=1, algo_mode='nr')
else:
self.show_iqa_photo(dis_mode=1, algo_mode='nr', scale_of_pho=curr_scale)
# FR_IQA็็ธๅ
ณๆงฝๅฝๆฐ
def fr_iqa_score_callback(self, i):
self.real_mark_lb_fr.setText(i)
# ้ๆฐไฝฟ่ฝๅผๅง่ฏไปทๅนถๆนๅญ
self.start_mark_btn_fr.setEnabled(True)
self.start_mark_btn_fr.setText('ๅผๅง่ฏไปท')
def choose_pho_1_fr(self):
save_path_tuple = QFileDialog.getOpenFileNames(self,
"่ฏท้ๆฉไธๅผ ๆจๆณ่ฆๅFR_IQA็ๅพ็(Ground Truth)",
self.init_open_addr,
"JPG Files (*.jpg);;PNG Files (*.png);;BMP Files (*.bmp)")
# ๆๅฅฝ่ฟๆฏๅซ็จ all file "All Files (*);;JPG Files (*.jpg);;PNG Files (*.png);;BMP Files (*.bmp)"
# ้ฒๆญข็จๆท้ๅบ๏ผๆฒกๆ้ๆไปถ๏ผๆ
้ๅคๆญtuple็ฌฌไธไธชๅ
็ด ๏ผๅณๆไปถๅฐๅๆฏๅฆไธบ็ฉบ
if save_path_tuple[0] == []:
pass
else:
# ๆดๆฐๅฐๅๆพ็คบๆๆฌๆก
self.fr_iqa_pho_addr_1 = save_path_tuple[0][0]
print(self.fr_iqa_pho_addr_1)
self.refresh_curr_pho_info(is_from_choose=True, pho_num=1)
# ๆดๆฐไธปๆพ็คบ็้ขๅ็ถๆๆ
self.show_fr_iqa_photo(shift_mode=1)
def choose_pho_2_fr(self):
save_path_tuple = QFileDialog.getOpenFileNames(self,
"่ฏท้ๆฉไธๅผ ๆจๆณ่ฆๅFR_IQA็ๅพ็(Distortion Photo)",
self.init_open_addr,
"JPG Files (*.jpg);;PNG Files (*.png);;BMP Files (*.bmp)")
# ๆๅฅฝ่ฟๆฏๅซ็จ all file "All Files (*);;JPG Files (*.jpg);;PNG Files (*.png);;BMP Files (*.bmp)"
# ้ฒๆญข็จๆท้ๅบ๏ผๆฒกๆ้ๆไปถ๏ผๆ
้ๅคๆญtuple็ฌฌไธไธชๅ
็ด ๏ผๅณๆไปถๅฐๅๆฏๅฆไธบ็ฉบ
if save_path_tuple[0] == []:
pass
else:
# ๆดๆฐๅฐๅๆพ็คบๆๆฌๆก
self.fr_iqa_pho_addr_2 = save_path_tuple[0][0]
print(self.fr_iqa_pho_addr_2)
self.refresh_curr_pho_info(is_from_choose=True, pho_num=2)
# ๆดๆฐไธปๆพ็คบ็้ขๅ็ถๆๆ
self.show_fr_iqa_photo(shift_mode=2)
def pho_zoom_in_fr(self):
curr_mode = None # ็ฎๅ็ๆพ็คบๆจกๅผ
if self.img_show_lb_2.isHidden():
# ๅ็
ง็ๆพ็คบ
if self.pho_addr_show_le_fr_1.text() != '' or self.pho_addr_show_le_fr_2.text() != '':
# ่ทๅๅฝๅๅพ็ๅฐๅไฟกๆฏ
curr_pho_addr = (self.current_photo_info[0].split(' ')[1]).split(';')[0]
if curr_pho_addr == self.pho_addr_show_le_fr_1.text() :
curr_mode = 1
if curr_pho_addr == self.pho_addr_show_le_fr_2.text() :
curr_mode = 2
next_index = self.pho_show_scale_single.index(self.curr_phot_show_scale[0])
print(next_index)
if self.pho_show_scale_single[next_index] == self.pho_show_scale_single[-1]:
pass
print('ๅฐ้กถไบ')
else:
next_index +=1
self.curr_phot_show_scale[0] = self.pho_show_scale_single[next_index]
self.show_fr_iqa_photo(shift_mode=curr_mode, curr_scale=self.curr_phot_show_scale)
else:
# ๅค็
ง็ๅๆถๆพ็คบ
# next_index = self.pho_show_scale_single.index(self.curr_phot_show_scale[0])
# print(next_index)
# if self.pho_show_scale_single[next_index] == self.pho_show_scale_single[-1]:
# pass
# print('ๅฐ้กถไบ')
# else:
# next_index += 1
# self.curr_phot_show_scale[0] = self.pho_show_scale_single[next_index]
# self.show_fr_iqa_photo(shift_mode=3, curr_scale=self.curr_phot_show_scale)
# ็ฎไบ่ฟๆฏๅซๅๅไบ๏ผไธคไธชlbๅจgbox้้ขๅ
ๆปกไบ๏ผๅไธไบ
pass
def pho_zoom_out_fr(self):
curr_mode = None
if self.img_show_lb_2.isHidden():
if self.pho_addr_show_le_fr_1.text() != '' or self.pho_addr_show_le_fr_2.text() != '':
# ่ทๅๅฝๅๅพ็ๅฐๅไฟกๆฏ
curr_pho_addr = (self.current_photo_info[0].split(' ')[1]).split(';')[0]
if curr_pho_addr == self.pho_addr_show_le_fr_1.text() :
curr_mode = 1
if curr_pho_addr == self.pho_addr_show_le_fr_2.text() :
curr_mode = 2
pre_index = self.pho_show_scale_single.index(self.curr_phot_show_scale[0])
print(pre_index)
if self.pho_show_scale_single[pre_index] == self.pho_show_scale_single[0]:
pass
print('ๅฐๅบไบ')
else:
pre_index -= 1
self.curr_phot_show_scale[0] = self.pho_show_scale_single[pre_index]
self.show_fr_iqa_photo(shift_mode=curr_mode, curr_scale=self.curr_phot_show_scale)
else:
pass
def pho_zoom_reset_fr(self):
if self.img_show_lb_2.isHidden():
if self.pho_addr_show_le_fr_1.text() == '' and self.pho_addr_show_le_fr_2.text() == '':
# ๆฒก้็
ง็
pass
else:
# ้ไบไธๅผ
curr_pho_addr = (self.current_photo_info[0].split(' ')[1]).split(';')[0]
self.curr_phot_show_scale = [0.7, 0.4]
if curr_pho_addr == self.pho_addr_show_le_fr_1.text():
self.show_fr_iqa_photo(shift_mode=1)
elif curr_pho_addr == self.pho_addr_show_le_fr_2.text():
self.show_fr_iqa_photo(shift_mode=2)
else:
self.show_fr_iqa_photo(shift_mode=1)
else:
pass
def dis_only_p1_fr(self):
if self.pho_addr_show_le_fr_1.text() != '':
# ๆดๆฐๅฝๅๅพ็ไฟกๆฏ ใstatusbarใๆดๆฐ้ขๆฟ
self.refresh_curr_pho_info(is_from_choose=False, pho_num=1)
self.curr_phot_show_scale = [0.7, 0.4]
self.show_fr_iqa_photo(shift_mode=1)
def dis_only_p2_fr(self):
if self.pho_addr_show_le_fr_2.text() != '':
# ๆดๆฐๅฝๅๅพ็ไฟกๆฏ ใstatusbarใๆดๆฐ้ขๆฟ
self.refresh_curr_pho_info(is_from_choose=False, pho_num=2)
self.curr_phot_show_scale = [0.7, 0.4]
self.show_fr_iqa_photo(shift_mode=2)
def dis_all_fr(self):
# ๆดๆฐstatusbarใๆดๆฐ้ขๆฟ
if self.pho_addr_show_le_fr_2.text() != '' and self.pho_addr_show_le_fr_2.text() != '':
self.show_fr_iqa_photo(shift_mode=3)
def start_score_fr(self):
if self.pho_addr_show_le_fr_1.text() !='' and self.pho_addr_show_le_fr_2.text() !='':
img1 = QImage(self.pho_addr_show_le_fr_1.text())
img2 = QImage(self.pho_addr_show_le_fr_2.text())
if img1.height() == img2.height() and img1.width() == img2.width():
# ่ทๅๅฝๅ็็ฎๆณๅๅพ็ๅฐๅ
algorithm = self.fr_algorithm_comboBox.currentText()
photo_addr_1 = self.pho_addr_show_le_fr_1.text()
photo_addr_2 = self.pho_addr_show_le_fr_2.text()
print(algorithm, photo_addr_1, photo_addr_2)
# ๅๆถไฝฟ่ฝ ๅผๅง่ฏไปทๆ้ฎ ๅนถๆ็คบโๆญฃๅจ่ฟ่กโ
self.start_mark_btn_fr.setEnabled(False)
self.start_mark_btn.setText('ๆญฃๅจ่ฏไปท')
# ๅผๅฏ็บฟ็จๅนถ่ฟ่ก
self.fr_iqa_thread.setting(algorithm, photo_addr_1, photo_addr_2)
self.fr_iqa_thread.start()
else:
you_should_choose_same_pho = QMessageBox.warning(self,
'ๆธฉ้ฆจๆ็คบ',
'\t่ฏทๆจ็กฎ่ฎคไธค็
ง็็ๅฐบๅฏธ๏ผๅบไฟ่ฏไธค่
็ธๅใ',
QMessageBox.Ok)
else:
you_should_choose_pho_first = QMessageBox.warning(self,
'ๆธฉ้ฆจๆ็คบ',
'\t่ฏทๆจ็กฎไฟ๏ผๅจๅณไธ่ง็กฎ่ฎคๆจๅทฒ้ๆฉไบ็ฎๆณๅไธคๅผ ็ธๅๅฐบๅฏธ็ๅพ็ใ',
QMessageBox.Ok)
def show_fr_iqa_photo(self, shift_mode, curr_scale=None):
# ็จไบๆพ็คบfr_iqaๅพ็
# ่พๅ
ฅ curr_fr_pho๏ผ 1ไปฃ่กจไธบpho1๏ผ 2ไปฃ่กจpho2
# shift_mode๏ผ 1ไปฃ่กจๅชๅฑ็คบpho1 2ๅชๅฑ็คบpho2๏ผ 3ไปฃ่กจไธ่ตทๅฑ็คบ
# ๅคๆญไปไปฌๆๆฒกๆ้ๅพ็
if self.pho_addr_show_le_fr_1.text() == '' and self.pho_addr_show_le_fr_2.text() == '':
pass
else:
if shift_mode == 1:
if curr_scale == None:
self.show_iqa_photo(dis_mode=1, algo_mode='fr_1')
else:
self.show_iqa_photo(dis_mode=1, algo_mode='fr_1', scale_of_pho=curr_scale)
if shift_mode == 2:
if curr_scale == None:
self.show_iqa_photo(dis_mode=1, algo_mode='fr_2')
else:
self.show_iqa_photo(dis_mode=1, algo_mode='fr_2', scale_of_pho=curr_scale)
if shift_mode == 3:
if curr_scale == None:
self.show_iqa_photo(dis_mode=2, algo_mode='fr_all')
else:
self.show_iqa_photo(dis_mode=2, algo_mode='fr_all', scale_of_pho=curr_scale)
# ๅ
ฌๅ
ฑๅฝๆฐ
def resizeEvent(self, evt):
curr_pho_addr = None
if self.img_show_lb_2.isHidden():
if self.current_photo_info != []:
curr_pho_addr = (self.current_photo_info[0].split(' ')[1]).split(';')[0]
# ๅคๆญๆฏๅช้็็
ง็ NR OR FR
if curr_pho_addr == self.pho_addr_show_le.text():
self.show_nr_iqa_photo()
elif curr_pho_addr == self.pho_addr_show_le_fr_1.text():
self.show_fr_iqa_photo(shift_mode=1, curr_scale=self.curr_phot_show_scale)
else:
self.show_fr_iqa_photo(shift_mode=2, curr_scale=self.curr_phot_show_scale)
else:
self.show_fr_iqa_photo(shift_mode=3, curr_scale=self.curr_phot_show_scale)
def refresh_curr_pho_info(self, is_from_choose, pho_num):
# ๆ นๆฎNR FR IQA้ๆฉๅฐๅฐๅบๅปๆดๆฐๅชไธช
# is_choose ็จไบๅคๆญๆฏๅฆไธบ้่ฟ้ๆฉๅพ็ๆฅๆดๆฐ็; ไปๅฆๆๆฏไป้ๆฉ่ๆฅ๏ผ้ฃไน้่ฆๆดๆฐlineedit็ปไปถ
# pho_num: 0๏ผ nr_iqa็ๅพ็๏ผ 1๏ผfr_iqa็ๅพ็1๏ผ 2๏ผfr_iqa็ๅพ็2๏ผ
# ๆดๆฐๅฐๅๆพ็คบๆๆฌๆก
if pho_num == 0:
self.iqa_pho_addr = self.nr_iqa_pho_addr
if is_from_choose:
self.pho_addr_show_le.setText(str(self.nr_iqa_pho_addr))
elif pho_num == 1:
self.iqa_pho_addr = self.fr_iqa_pho_addr_1
if is_from_choose:
self.pho_addr_show_le_fr_1.setText(str(self.fr_iqa_pho_addr_1))
else:
self.iqa_pho_addr = self.fr_iqa_pho_addr_2
if is_from_choose:
self.pho_addr_show_le_fr_2.setText(str(self.fr_iqa_pho_addr_2))
# print(self.iqa_pho_addr)
if self.current_photo_info == []:
self.current_photo_info.append("ๅฝๅ็็
ง็ไธบ: " + str(self.iqa_pho_addr) + "; ")
else:
self.current_photo_info[0] = "ๅฝๅ็็
ง็ไธบ: " + str(self.iqa_pho_addr) + "; "
print('ใrefresh_curr_pho_infoใๅฝๅๅพ็ๅฐๅไฟกๆฏไธบ๏ผ', self.current_photo_info)
def show_iqa_photo(self, dis_mode, algo_mode, scale_of_pho=None):
# dis_mode ่กจ็คบๅฐๅบๆฏ่ฆๆพ็คบไธๅผ ่ฟๆฏๆพ็คบไธคๅผ
# 1 ไธๅผ 2 ไธคๅผ
# algo_mode ไปฃ่กจไธๅ็ฎๆณ
# โnrโ ๆ ๅ่ โfr_1โ ๅ
จๅ่1, 'fr_2' ๅ
จๅ่2
img = None
curr_pho_addr = None
# ๅฆๆๆฏๆชๆๅฎscaleๅฐฑ็จ้ป่ฎค็๏ผๅฆๅไปself.curr_phot_show_scaleๆพ
if scale_of_pho == None:
scale_of_pho = [0.7, 0.4]
else:
scale_of_pho = self.curr_phot_show_scale
# ๅคๆญๆฏๅชๅผ ๅพ
if algo_mode == 'nr':
img = QImage(self.nr_iqa_pho_addr)
curr_pho_addr = self.nr_iqa_pho_addr
if algo_mode == 'fr_1':
img = QImage(self.fr_iqa_pho_addr_1)
curr_pho_addr = self.fr_iqa_pho_addr_1
if algo_mode == 'fr_2':
img = QImage(self.fr_iqa_pho_addr_2)
curr_pho_addr = self.fr_iqa_pho_addr_2
if algo_mode == 'fr_all':
pass
# ๅคๆญๆฏๅช็งๆจกๅผ
if dis_mode == 1:
# ๅ
ๆ็ฌฌไบไธชlbๅ
ณๆ
self.img_show_lb_2.hide()
# scaleๅฎไนไธบsingle็scale๏ผ ้ป่ฎค0.7
pho_show_scale = scale_of_pho[0]
# ๆดๆฐ็ถๆๆ
if len(self.current_photo_info) <= 1:
self.current_photo_info.append("ๅพ็ๅฎ้
ๅคงๅฐไธบ: " + str(img.width()) + " โ " + str(img.height()) + "; ")
else:
self.current_photo_info[1] = "ๅพ็ๅฎ้
ๅคงๅฐไธบ: " + str(img.width()) + " โ " + str(img.height()) + "; "
print(self.current_photo_info)
self.statusbar.showMessage(''.join(self.current_photo_info))
# ๆ นๆฎๅพ็้ฟๅฎฝๆฏๆฅ่ฐๆดlabel็ๅฐบๅฏธ
if img.height() > img.width():
# ่ฎพ็ฝฎ4๏ผ3ๅฐบๅฏธ๏ผๅฆๆๅฎฝ้ซๆฏๆฏ3๏ผ4 ้ฃไนๅงlabelไนๅไธไธๅๅฑ็คบ
self.img_show_lb.setFixedSize((self.main_show_gbox.height() * pho_show_scale) / self.ratio_of_photo,
self.main_show_gbox.height() * pho_show_scale)
jpg = QtGui.QPixmap(curr_pho_addr).scaled(self.img_show_lb.width(),
self.img_show_lb.height(),
Qt.KeepAspectRatio,
# ไฟๆๅฎฝ้ฟๆฏ๏ผ็ถๅ็ผฉๆพๅไธ่ถ
่ฟๆ้ฟ่พน ๅฆๅคไธค็งไธบIgnoreAspectRatio KeepAspectRatioByExpanding
Qt.SmoothTransformation) # ๅ็บฟๆงๆๅผ ๅฆไธ็งไธบFastTransformation ไธไฝฟ็จๆๅผ ่ฏฆ่งhttps://www.cnblogs.com/qixianyu/p/6891054.html
self.img_show_lb.setPixmap(jpg)
else:
self.img_show_lb.setFixedSize(self.main_show_gbox.width() * pho_show_scale,
(self.main_show_gbox.width() * pho_show_scale) / self.ratio_of_photo)
jpg = QtGui.QPixmap(curr_pho_addr).scaled(self.img_show_lb.width(),
self.img_show_lb.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
self.img_show_lb.setPixmap(jpg)
if dis_mode == 2:
# ๅ
ๆ็ฌฌไบไธชlbๆๅผ
self.img_show_lb_2.show()
# scaleๅฎไนไธบdouble็scale๏ผ ้ป่ฎค0.4
pho_show_scale = scale_of_pho[1]
img = QImage(self.fr_iqa_pho_addr_1)
img_2 = QImage(self.fr_iqa_pho_addr_2)
# ๆดๆฐ็ถๆๆ
self.statusbar.showMessage('ไธคๅผ ็
ง็ๅๅฏนๆฏ๏ผๅทฆๅพไธบ๏ผๅพ็1๏ผ ๅณๅพไธบ๏ผๅพ็2ใ')
# ๆ นๆฎๅพ็้ฟๅฎฝๆฏๆฅ่ฐๆดlabel็ๅฐบๅฏธ
if img.height() > img.width():
# ่ฎพ็ฝฎ4๏ผ3ๅฐบๅฏธ๏ผๅฆๆๅฎฝ้ซๆฏๆฏ3๏ผ4 ้ฃไนๅงlabelไนๅไธไธๅๅฑ็คบ
self.img_show_lb.setFixedSize((self.main_show_gbox.height() * pho_show_scale) / self.ratio_of_photo,
self.main_show_gbox.height() * pho_show_scale)
jpg = QtGui.QPixmap(self.fr_iqa_pho_addr_1).scaled(self.img_show_lb.width(),
self.img_show_lb.height(),
Qt.KeepAspectRatio,
# ไฟๆๅฎฝ้ฟๆฏ๏ผ็ถๅ็ผฉๆพๅไธ่ถ
่ฟๆ้ฟ่พน ๅฆๅคไธค็งไธบIgnoreAspectRatio KeepAspectRatioByExpanding
Qt.SmoothTransformation) # ๅ็บฟๆงๆๅผ ๅฆไธ็งไธบFastTransformation ไธไฝฟ็จๆๅผ ่ฏฆ่งhttps://www.cnblogs.com/qixianyu/p/6891054.html
self.img_show_lb.setPixmap(jpg)
else:
self.img_show_lb.setFixedSize(self.main_show_gbox.width() * pho_show_scale,
(self.main_show_gbox.width() * pho_show_scale) / self.ratio_of_photo)
jpg = QtGui.QPixmap(self.fr_iqa_pho_addr_1).scaled(self.img_show_lb.width(),
self.img_show_lb.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
self.img_show_lb.setPixmap(jpg)
if img_2.height() > img_2.width():
# ่ฎพ็ฝฎ4๏ผ3ๅฐบๅฏธ๏ผๅฆๆๅฎฝ้ซๆฏๆฏ3๏ผ4 ้ฃไนๅงlabelไนๅไธไธๅๅฑ็คบ
self.img_show_lb_2.setFixedSize((self.main_show_gbox.height() * pho_show_scale) / self.ratio_of_photo,
self.main_show_gbox.height() * pho_show_scale)
jpg2 = QtGui.QPixmap(self.fr_iqa_pho_addr_2).scaled(self.img_show_lb_2.width(),
self.img_show_lb_2.height(),
Qt.KeepAspectRatio,
# ไฟๆๅฎฝ้ฟๆฏ๏ผ็ถๅ็ผฉๆพๅไธ่ถ
่ฟๆ้ฟ่พน ๅฆๅคไธค็งไธบIgnoreAspectRatio KeepAspectRatioByExpanding
Qt.SmoothTransformation) # ๅ็บฟๆงๆๅผ ๅฆไธ็งไธบFastTransformation ไธไฝฟ็จๆๅผ ่ฏฆ่งhttps://www.cnblogs.com/qixianyu/p/6891054.html
self.img_show_lb_2.setPixmap(jpg2)
else:
self.img_show_lb_2.setFixedSize(self.main_show_gbox.width() * pho_show_scale,
(self.main_show_gbox.width() * pho_show_scale) / self.ratio_of_photo)
jpg2 = QtGui.QPixmap(self.fr_iqa_pho_addr_2).scaled(self.img_show_lb_2.width(),
self.img_show_lb_2.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
self.img_show_lb_2.setPixmap(jpg2)
def quick_change_pho_by_mouse(self):
if not (self.pho_addr_show_le.text() =='' and self.pho_addr_show_le_fr_1.text() == '' and self.pho_addr_show_le_fr_2.text() == ''):
quick_opt_Menu = QMenu()
quick_opt_Menu.addAction(QAction(u'ๆพๅคง', self))
quick_opt_Menu.addAction(QAction(u'็ผฉๅฐ', self))
quick_opt_Menu.triggered[QAction].connect(self.processtrigger)
quick_opt_Menu.exec_(QCursor.pos())
# ๅณ้ฎๆ้ฎไบไปถ
def processtrigger(self, q):
# ่พๅบ้ฃไธชQmenuๅฏน่ฑก่ขซ็นๅป
if q.text() == "ๆพๅคง":
if self.img_show_lb_2.isHidden():
curr_pho_addr = (self.current_photo_info[0].split(' ')[1]).split(';')[0]
if curr_pho_addr == self.pho_addr_show_le.text():
self.pho_zoom_in()
else:
self.pho_zoom_in_fr()
else:
pass
elif q.text() == "็ผฉๅฐ":
if self.img_show_lb_2.isHidden():
curr_pho_addr = (self.current_photo_info[0].split(' ')[1]).split(';')[0]
if curr_pho_addr == self.pho_addr_show_le.text():
self.pho_zoom_out()
else:
self.pho_zoom_out_fr()
else:
pass
# def mousePressEvent(self, event):
# if event.button() == Qt.LeftButton:
# # ๅคๆญๆฏๅฆ้ผ ๆ ๅจๆงไปถไธ ๅๆฅๅ็ฐๅ
ถๅฎไธ้่ฆ๏ผๅช้่ฆๆm_flagๅๅงๅ๏ผ
# # if not (self.start_button.underMouse() or self.exit_button.underMouse() or self.change_skin_button.underMouse() or self.get_info_button.underMouse() ):
# self.m_flag = True
# self.m_Position = event.globalPos() - self.pos() # ่ทๅ้ผ ๆ ็ธๅฏน็ชๅฃ็ไฝ็ฝฎ
# event.accept()
# self.setCursor(QCursor(Qt.ClosedHandCursor)) # ๆดๆน้ผ ๆ ๅพๆ
#
# def mouseMoveEvent(self, QMouseEvent):
# if Qt.LeftButton and self.m_flag:
# self.move(QMouseEvent.globalPos() - self.m_Position) # ๆดๆน็ชๅฃไฝ็ฝฎ
# QMouseEvent.accept()
#
# def mouseReleaseEvent(self, QMouseEvent):
# self.m_flag = False
# self.setCursor(QCursor(Qt.ArrowCursor))
# NR_IQA่ฏๅ็บฟ็จ
class My_NR_IQA_Thread(QThread): # ๅปบ็ซไธไธชไปปๅก็บฟ็จ็ฑป
score_signal = pyqtSignal(str) #่ฎพ็ฝฎ่งฆๅไฟกๅทไผ ้็ๅๆฐๆฐๆฎ็ฑปๅ,่ฟ้ๆฏๅญ็ฌฆไธฒ
def __init__(self):
super(My_NR_IQA_Thread, self).__init__()
def setting(self, algorithm, pho_path, eng):
self.algo = algorithm
self.pho_path = pho_path
self.eng = eng
def run(self): # ๅจๅฏๅจ็บฟ็จๅไปปๅกไป่ฟไธชๅฝๆฐ้้ขๅผๅงๆง่ก
algo = goto_nriqa()
score = algo.run(algorithm_name=self.algo, photo_path=self.pho_path, eng=self.eng)
score = np.round(float(str(score)), 4)
print(score)
# main_iqa_pane.real_mark_lb.setText(str(score).split('.')[0])
self.score_signal.emit(str(score))
# FR_IQA่ฏๅ็บฟ็จ
class My_FR_IQA_Thread(QThread): # ๅปบ็ซไธไธชไปปๅก็บฟ็จ็ฑป
fr_score_signal = pyqtSignal(str) #่ฎพ็ฝฎ่งฆๅไฟกๅทไผ ้็ๅๆฐๆฐๆฎ็ฑปๅ,่ฟ้ๆฏๅญ็ฌฆไธฒ
def __init__(self):
super(My_FR_IQA_Thread, self).__init__()
def setting(self, algorithm, pho_path_1, pho_path_2):
self.algo = algorithm
self.pho_path_1 = pho_path_1
self.pho_path_2 = pho_path_2
def run(self): # ๅจๅฏๅจ็บฟ็จๅไปปๅกไป่ฟไธชๅฝๆฐ้้ขๅผๅงๆง่ก
algo = FR_IQA_method()
score = algo.get_score(self.algo, self.pho_path_1, self.pho_path_2)
if self.algo == 'SSIM':
score = str(np.round(float(str(score[0])), 3)) + ',' + str(np.round(float(str(score[1])), 3))
else:
score = np.round(float(str(np.real(score))), 4)
print(score)
# main_iqa_pane.real_mark_lb.setText(str(score).split('.')[0])
self.fr_score_signal.emit(str(score))
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainIQAPane()
# # ็ชๅฃๆๅคง่ฏ
# window.showMaximized()
# window.exit_signal.connect(lambda :print("้ๅบ"))
# window.register_account_pwd_signal.connect(lambda a, p: print(a, p))
window.show()
sys.exit(app.exec_()) |
#! /usr/local/bin/python
#-*- coding: utf-8 -*-
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.1 $"
__date__ = "$Date: 2010/10/01 $"
from PIL import Image
def a2bits(chars):
"""
Convert a string to its bits representation as a string of 0's and 1's.
"""
return bin(reduce(lambda x, y : (x<<8)+y, (ord(c) for c in chars), 1))[3:]
def bs(s):
"""
Convert a int to its bits representation as a string of 0's and 1's.
"""
return str(s) if s<=1 else bs(s>>1) + str(s&1)
def encode_image(img, message):
"""
Hide a message (string) in an image with the
LSB (Less Significant Bit) technic.
"""
encoded = img.copy()
width, height = img.size
index = 0
message = message + '~~~'
message_bits = a2bits(message)
for row in range(height):
for col in range(width):
if index + 3 <= len(message_bits) :
(r, g, b) = img.getpixel((col, row))
# Convert in to bits
r_bits = bs(r)
g_bits = bs(g)
b_bits = bs(b)
# Replace (in a list) the least significant bit
# by the bit of the message to hide
list_r_bits = list(r_bits)
list_g_bits = list(g_bits)
list_b_bits = list(b_bits)
list_r_bits[-1] = message_bits[index]
list_g_bits[-1] = message_bits[index + 1]
list_b_bits[-1] = message_bits[index + 2]
# Convert lists to a strings
r_bits = "".join(list_r_bits)
g_bits = "".join(list_g_bits)
b_bits = "".join(list_b_bits)
# Convert strings of bits to int
r = int(r_bits, 2)
g = int(g_bits, 2)
b = int(b_bits, 2)
# Save the new pixel
encoded.putpixel((col, row), (r, g , b))
index += 3
return encoded
def decode_image(img):
"""
Find a message in an encoded image (with the
LSB technic).
"""
width, height = img.size
bits = ""
index = 0
for row in xrange(height - 1, -1, -1):
for col in xrange(width - 1, -1, -1):
#print img.getpixel((col, row))
r, g, b, aux = img.getpixel((col, row))
#r, b, g, aux = img.getpixel((col, row))
#b, g, r, aux = img.getpixel((col, row))
#b, r, g, aux = img.getpixel((col, row))
#g, b, r, aux = img.getpixel((col, row))
#g, r, b, aux = img.getpixel((col, row))
bits += bs(r)[-1] + bs(g)[-1] + bs(b)[-1]
if len(bits) >= 8:
if chr(int(bits[-8:], 2)) == '~':
list_of_string_bits = ["".join(list(bits[i*8:(i*8)+8])) for i in range(0, len(bits)/8)]
list_of_character = [chr(int(elem, 2)) for elem in list_of_string_bits]
return "".join(list_of_character)[:-1]
return ""
if __name__ == '__main__':
# Test it
img2 = Image.open('map.png')
print(decode_image(img2))
|
# -*- coding:utf-8 -*-
"""
ะฃัะฐะผัััะปะปัะฝ ั
าฏัะฝัะณั
"""
from django.db import models
# from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
from django.urls import reverse_lazy
from src.core import constant as const
from src.core.validate import validate_nonzero
from src.warehouse.models import Warehouse
from src.product.models import Product
from src.customer.models import CustomerCategory, Customer
# ('าฎะฝะดััะฝ าฏะฝััั', True),
# ('ะฅัะผะดะฐััะฐะฝ าฏะฝััั', False),
# ('ะฅัััะณะถาฏาฏะปัั
', True),
# ('ะฅัััะณะถาฏาฏะปัั
ะณาฏะน', False),
# ('ะะฒะฐั
', True),
# ('ะะพะฝัั', False),
class Promotion(models.Model):
"""
ะฃัะฐะผัััะปะฐะป
"""
name = models.CharField(verbose_name="ะัั", max_length=256)
start_date = models.DateTimeField(verbose_name="ะฃัะฐะผัััะปะฐะป ัั
ะปัั
ะพะณะฝะพะพ")
end_date = models.DateTimeField(verbose_name="ะฃัะฐะผัััะปะฐะป ะดัััะฐั
ะพะณะฝะพะพ")
calculation_type = models.BooleanField(verbose_name="ะขะพะพัะพะพะปะพั
ัำฉัำฉะป", default=True)
order = models.PositiveIntegerField(verbose_name="ะฅัััะณะถาฏาฏะปัั
ะดะฐัะฐะฐะปะฐะป")
description = models.TextField(verbose_name="ะขะฐะนะปะฑะฐั", null=True, blank=True)
############################################################
promotion_type = models.IntegerField(
verbose_name="ะฃัะฐะผัััะปะปัะฝ ัำฉัำฉะป", choices=const.PROMOTION_TYPE
)
############################################################
product_type = models.IntegerField(
verbose_name="ะาฏัััะณะดัั
าฏาฏะฝะด ั
ัััะณะถะธั
", choices=const.PRODUCT_TYPE, null=True
)
############################################################
products = models.ManyToManyField(
Product, verbose_name="ะาฏัััะณะดัั
าฏาฏะฝ", blank=True, related_name="promotions"
)
############################################################
promotion_implement_type = models.IntegerField(
verbose_name="ะฃัะฐะผัััะปะฐะป ั
ัััะณะถะธั
ัำฉัำฉะป",
choices=const.PROMOTION_IMPLEMENT_TYPE,
null=True,
)
above_the_price = models.PositiveIntegerField(
verbose_name="าฎะฝะธะนะฝ ะดาฏะฝะณััั ะดััั",
null=True,
blank=True,
help_text="ะขัั
ะฐะนะฝ าฏะฝััั ะดััั ั
ัะดะฐะปะดะฐะฝ ะฐะฒะฐะปั ั
ะธะนััะฝ าฏะตะด ััะฐะผัััะปะฐะป ั
ัััะณะถะธะฝั",
)
percent = models.FloatField(
verbose_name="ะฅัะฒั",
null=True,
blank=True,
validators=[MinValueValidator(0), MaxValueValidator(99.9)],
)
price = models.FloatField(
verbose_name="าฎะฝั", null=True, blank=True, validators=[MinValueValidator(0.1)],
)
above_the_number = models.IntegerField(
verbose_name="ะขะพะพะฝะพะพั ะดััั",
null=True,
blank=True,
validators=[MinValueValidator(1)],
)
supplier = models.ForeignKey(
Customer,
verbose_name="ะะธะนะปาฏาฏะปัะณั",
on_delete=models.CASCADE,
related_name="supplier_promotions",
null=True,
)
quantity = models.PositiveIntegerField(
verbose_name="ะะฐะณัะธะนะฝ ัะพะพ ั
ัะผะถัั",
null=True,
validators=[validate_nonzero],
help_text="ะัััั
ะธะด ััะณะฐ ะพัััะปัะฝะฐะฐั ะฑะฐะณัะฐะด ั
ะฐะผะฐะฐัะฐั
ะฑาฏัััะณะดัั
าฏาฏะฝาฏาฏะดะธะนะฝ ะฝะธะนั ัะพะพ ั
ัะผะถัั ั
าฏััั
าฏะตะด ััะฐะผัััะปะฐะป ั
ัััะณะถะธะฝั",
)
############################################################
implement_type = models.IntegerField(
verbose_name="ะฅะฐัะธะปัะฐะณัะธะด ั
ัััะณะถาฏาฏะปัั
ัำฉัำฉะป", choices=const.IMPLEMENT_TYPE
)
customer_categories = models.ManyToManyField(
CustomerCategory, verbose_name="ะฅะฐัะธะปัะฐะณัะธะนะฝ ัำฉัำฉะป", related_name="promotions",
)
customers = models.ManyToManyField(
Customer, verbose_name="ะฅะฐัะธะปัะฐะณัะธะด", related_name="promotions",
)
warehouses = models.ManyToManyField(
Warehouse, verbose_name="ะะณััะปะฐั
", related_name="promotions",
)
is_implement = models.BooleanField(
verbose_name="ะฅัััะณะถาฏาฏะปะฝั/ะฅัััะณะถาฏาฏะปัั
ะณาฏะน", default=True
)
############################################################
is_active = models.BooleanField(verbose_name="ะะดัะฒั
ะธััะน", default=True)
created_at = models.DateTimeField(verbose_name="าฎาฏัััะฝ ะพะณะฝะพะพ", auto_now_add=True)
updated_at = models.DateTimeField(verbose_name="ะะฐััะฐะฝ ะพะณะฝะพะพ", auto_now=True)
class Meta:
verbose_name = "ะฃัะฐะผัััะปะฐะป"
verbose_name_plural = "ะฃัะฐะผัััะปะฐะปะปััะด"
ordering = ["order", "-id"]
def __str__(self):
return self.name
def get_promotion_type(self):
return self.get_promotion_type_display()
get_promotion_type.short_description = "ะฃัะฐะผัััะปะปัะฝ ัำฉัำฉะป"
def get_implement_type(self):
return self.get_implement_type_display()
get_implement_type.short_description = "ะฅะฐัะธะปัะฐะณัะธะด ั
ัััะณะถาฏาฏะปัั
ัำฉัำฉะป"
def get_date(self):
start_date = self.start_date.strftime("%Y-%m-%d")
end_date = self.end_date.strftime("%Y-%m-%d")
return "%s - %s" % (start_date, end_date)
get_date.short_description = "ะะณะฝะพะพ"
def get_action(self):
return """
<div class = "dropdown">
<button class = "btn btn-white btn-xs dropdown-toggle" type = "button"
id = "dropdownMenuButton"
data-toggle = "dropdown"
aria-haspopup = "true"
aria-expanded = "false" >
<i data-feather = "settings"></i>
ะขะพั
ะธัะณะพะพ
</button>
<div class = "dropdown-menu"
aria-labelledby = "dropdownMenuButton">
<a href="javascript:;" class="dropdown-item detailInformation" data-href="{0}">ะัะปะณัััะฝะณาฏะน</a>
<a href="javascript:;" class="dropdown-item detailInformation" data-href="{1}">ำจำฉััะปำฉะปัะธะนะฝ ัาฏาฏั
</a>
<a class="dropdown-item" href="{2}">ะะฐัะฐั
</a>
<a class="dropdown-item" href="javascript:;" data-toggle="deleteAlert" data-href="{3}">ะฃััะณะฐั
</a>
</div>
</div>""".format(
reverse_lazy("employee-promotion-detail", kwargs={"pk": self.pk}),
reverse_lazy("employee-promotion-history", kwargs={"pk": self.pk}),
reverse_lazy("employee-promotion-update", kwargs={"pk": self.pk}),
reverse_lazy("employee-promotion-delete", kwargs={"pk": self.pk}),
)
class PromotionProduct(models.Model):
"""
ะฃัะฐะผัััะปะฐะปะด ะพัะพั
ะฑาฏัััะณะดัั
าฏาฏะฝ
"""
promotion = models.ForeignKey(
Promotion,
verbose_name="ะฃัะฐะผัััะปะฐะป",
on_delete=models.CASCADE,
related_name="promotion_products",
)
product = models.ForeignKey(
Product,
verbose_name="ะาฏัััะณะดัั
าฏาฏะฝ",
on_delete=models.CASCADE,
related_name="promotion_products",
)
quantity = models.PositiveIntegerField(
verbose_name="ะขะพะพ ั
ัะผะถัั", null=True, validators=[validate_nonzero], default=1
)
is_not_bonus = models.BooleanField(verbose_name="ะะฒะฐั
/ำจะณำฉั
", default=True)
class Meta:
verbose_name = "ะฃัะฐะผัััะปะปัะฝ ะฑาฏัััะณะดัั
าฏาฏะฝ"
verbose_name_plural = "ะฃัะฐะผัััะปะปัะฝ ะฑาฏัััะณะดัั
าฏาฏะฝาฏาฏะด"
def __str__(self):
return self.promotion.name
|
import sys
previousTries = []
listOfNumbers = sys.stdin.readline().strip().split("\t")
listOfNumbers = list(map(int, listOfNumbers))
controllerList = list(listOfNumbers)
previousTries.append(controllerList)
counter = 0
controller = True
currentList = list(listOfNumbers)
while controller:
maxVal = -1
for i in range(len(currentList)):
if currentList[i] > maxVal:
maxVal = currentList[i]
maxIndex = i
currentList[maxIndex] = 0
while maxVal > 0:
maxIndex = (maxIndex + 1) % len(currentList)
currentList[maxIndex] += 1
maxVal -= 1
counter += 1
for listItem in previousTries:
if currentList == listItem:
controller = False
previousTries.append(list(currentList))
print(counter)
#Task 2
import sys
previousTries = []
listOfNumbers = sys.stdin.readline().strip().split("\t")
listOfNumbers = list(map(int, listOfNumbers))
controllerList = list(listOfNumbers)
previousTries.append(controllerList)
counter = 0
controller = True
currentList = list(listOfNumbers)
while controller:
maxVal = -1
for i in range(len(currentList)):
if currentList[i] > maxVal:
maxVal = currentList[i]
maxIndex = i
currentList[maxIndex] = 0
while maxVal > 0:
maxIndex = (maxIndex + 1) % len(currentList)
currentList[maxIndex] += 1
maxVal -= 1
counter += 1
for listItem in previousTries:
if currentList == listItem:
controller = False
previousTries.append(list(currentList))
counter = 0
found = False
for listItem in previousTries:
if listItem == currentList:
found = True
if found:
counter += 1
print(counter)
|
def load_clean_descriptions(filename):
train_doc = load_doc(filename)
train_text = list()
for line in train_doc.split('\n'):
identifier = line.split('.')[0]
train_text.append(identifier)
train_desc = dict()
for txt in train_text:
if txt in descriptions:
if txt not in train_desc:
train_desc[txt] = []
for desc in descriptions[txt]:
# wrap description in tokens
train_desc[txt].append('sos ' + desc + ' eos')
return train_text, train_desc
### Loading training image text file
filename = '/content/drive/My Drive/Image Captioning Data/Text Data/Flickr_8k.trainImages.txt'
train_text, train_desc = load_clean_descriptions(filename)
print('Dataset: %d' % len(train_text))
#Dataset: 6001
def load_clean_descriptions_test(filename):
train_doc = load_doc(filename)
train_text = list()
for line in train_doc.split('\n'):
identifier = line.split('.')[0]
train_text.append(identifier)
train_desc = dict()
for txt in train_text:
if txt in descriptions:
if txt not in train_desc:
train_desc[txt] = []
for desc in descriptions[txt]:
# wrap description in tokens
train_desc[txt].append(desc)
return train_text, train_desc
# Loading validation descriptions
# Loading val_image text file
filename = '/content/drive/My Drive/Image Captioning Data/Text Data/Flickr_8k.devImages.txt'
val_text, val_desc = load_clean_descriptions_test(filename)
print('Dataset: %d' % len(val_text))
# Loading test descriptions
# Loading test_image text file
filename = '/content/drive/My Drive/Image Captioning Data/Text Data/Flickr_8k.testImages.txt'
test_text, test_desc = load_clean_descriptions_test(filename)
print('Dataset: %d' % len(test_text))
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
setup(name='datashare-preview',
version='1.1.0',
description="App to show document previews with a backend Elasticsearch",
long_description=README,
classifiers=[
"Programming Language :: Python",
"Framework :: FastAPI",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
keywords='icij, elasticsearch, preview',
author='Pierre Romera, Bruno Thomas',
author_email='promera@icij.org, bthomas@icij.org',
url='https://github.com/ICIJ/datashare-preview',
license='LICENSE',
packages=find_packages(exclude=("*.tests", "*.tests.*", "tests.*", "tests", "*.test_utils")),
include_package_data=True,
zip_safe=False,
install_requires=[
'preview-generator==0.29',
'pygelf==0.3.6',
'fastapi',
'pydantic',
'aiofiles',
'fastapi-utils',
'httpx==0.23.0',
'uvicorn[standard]',
],
extras_require={
'dev': [
'bumpversion==0.5.3',
'respx',
'nose',
'requests'
],
},
test_suite="nose.collector",
entry_points={
'paste.app_factory': [
'main = dspreview.main:app',
],
})
|
import pandas as pd
import tushare as ts
import datetime
import time
from datetime import date
from matplotlib.dates import drange
# Set up token
# only run this line for the 1st time or when needed:
# ts.set_token("a2ecd994e3833787987ca0fc216ee1cfe42e895fd37634c21b0b322b")
# Save files to user-specified filepath
filepath = "D:\\Yangze_Investment\\Tushare_Pro_Data\\"
subpath = ["stock_series_by_date_ex\\",
"stock_series_by_date_adjust\\"
]
# Retrieve fundamental information on LISTED ("L") stocks
data_api = ts.pro_api()
stock_list_pro = data_api.stock_basic(exchange="", list_status="L",
fields="ts_code, symbol, name, area, industry, list_date")
stock_ts_code = stock_list_pro["ts_code"]
# code for all stocks
# create dates list
# count one day forward from today in order to get the dates list from start date up to "today"
end = datetime.date.today() + datetime.timedelta(days=1)
start = date(2019, 8, 12) # set date(YYYY, M, D) as the start date for data retrival
delta = datetime.timedelta(days=1) # set increment as one day
float_date_list = drange(start, end, delta)
date_list = []
for day in range(len(float_date_list)):
# create a dates list with YYYYMMDD date format
date_list.append(date.fromordinal(int(float_date_list[day])).strftime("%Y%m%d"))
time_elapse_list_ex = [] # runtime recorder
start_time_overall = datetime.datetime.now() # starting time for all stocks
for date in range(len(date_list)):
daily_series_concat = pd.DataFrame()
for index in range(3):
start_time_each = datetime.datetime.now() # starting time for individual stocks
# api for daily prices
daily_series = data_api.daily(ts_code=stock_ts_code[index], start_date=date_list[date], end_date=date_list[date])
# append data from each stock together to generate data on all stocks for a given date
daily_series_concat = pd.concat([daily_series_concat, daily_series])
end_time_each = datetime.datetime.now() # end time for individual stocks
print(f"{stock_ts_code[index]} " + f"{end_time_each - start_time_each}")
# store runtime for individual stock
time_elapse_list_ex.append(f"{stock_ts_code[index]} " + f"{end_time_each - start_time_each}")
# KEY: the program will hit the retrieval restriction (200 times/minute) without this sleep time
time.sleep(0.5)
daily_series_df = pd.DataFrame(daily_series_concat)
daily_series_df.to_csv(filepath + subpath[0] + date_list[date] + "_series_all_stocks_ex.csv",
index=False, header=True)
print("*" * 12 + " " + f"data for {date_list[date]}" + " " + "*" * 12)
print("*" * 43)
end_time_overall = datetime.datetime.now()
print(f"Overall runtime for {len(stock_ts_code)} listed stocks from {date_list[0]} to {date_list[-1]} " + f"{end_time_overall - start_time_overall}")
# create a csv file recording runtime for individual and all stocks
time_elapse_list_ex.append(f"Overall runtime for {len(stock_ts_code)} listed stocks from {date_list[0]} to {date_list[-1]} " + f"{end_time_overall - start_time_overall}")
time_elapse_list_ex_df = pd.DataFrame(time_elapse_list_ex, columns=["runtime in seconds"])
time_elapse_list_ex_df.to_csv(filepath + subpath[0] + "time_elapse_daily_listed_by_date_ex.csv", header=True)
# Retrieve fundamental information on DELISTED ("D") stocks
# data_api = ts.pro_api()
# stock_list_pro = data_api.stock_basic(exchange="", list_status="D",
# fields="ts_code, symbol, name, area, industry, list_date")
# stock_ts_code = stock_list_pro["ts_code"]
|
# -*- coding: utf-8 -*-
"""
Created on 05 February, 2018 @ 10:42 PM
@author: Bryant Chhun
email: bchhun@gmail.com
Project: BayLabs
License:
"""
import numpy as np
from scipy.interpolate import LinearNDInterpolator as plinear
def scale_contour(x, y, z, space_x, space_y, space_z):
'''
The spacing values must be parsed out from the .mhd image files, NOT the .vtk meshes
:param x:
:param y:
:param z:
:return:
'''
return x/space_x, y/space_y, z/space_z
def downsample_contour(x, y, z):
'''
round coordinate value to the nearest integer
necessary for assignment to 3d array
:param x:
:param y:
:param z:
:return:
'''
x = np.asarray(list(map(round, x)), dtype=np.int)
y = np.asarray(list(map(round, y)), dtype=np.int)
z = np.asarray(list(map(round, z)), dtype=np.int)
return x, y, z
def contour_to_mask(x, y, z, width, height, zdepth):
'''
method to be applied before downsample contour.
Using scipy's LinearNDInterpolator, determine a convex hull that describes surface,
LinearInterpolator returns zero (user defined), if input coords are outside hull
Loop pixel-wise to assign values (more clever way probably exists!)
About 20-30 mins per 200x200x200 array
:param x: array of vtk x coords
:param y: array of vtk y coords
:param z: array of vtk z coords
:param width: target image width
:param height: target image height
:param zdepth: target image zdepth
:return: binary mask np volume
'''
coords = np.array(list(zip(z, y, x)))
values = np.ones(shape=(coords.shape[0]))
vtk_lp = plinear(coords, values, 0)
coord_array = np.zeros(shape=(zdepth, height, width))
for idx1, plane in enumerate(coord_array):
for idx2, row in enumerate(plane):
for idx3, column in enumerate(row):
coord_array[idx1][idx2][idx3] = vtk_lp(idx1,idx2,idx3)
print('new mask')
return coord_array |
# import pytest
from loadmatlab_workspace import load_mat
before=load_mat("before-updateseries-nopinone-unsure")
s=before['s']
def comparinginput(python_in):
return python_in
def test_answer():
assert comparinginput(s)=s |
def add_total(n):
res=0
for x in range(n+1):
res+=x
return res
def mul_total(n):
global g_mul
for x in range(1,n+1):
g_mul*=x
n=int(input())
g_mul=1
mul_total(n)
print("add_total():", add_total(n))
print("gMul:", g_mul) |
from .resolver import Pushrod, pushrod_view
from .renderers import UnrenderedResponse
from . import renderers, resolver
|
# 1486. ์ฅํ์ด์ ๋์ ์ ๋ฐ D4
# https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AV2b7Yf6ABcBBASw&categoryId=AV2b7Yf6ABcBBASw&categoryType=CODE
# binary subset ์ ์ด์ฉํ๋ ๋ฐฉ์๋ณด๋ค Stack ์ ์ด์ฉํ๋ ๋ฐฉ์์ด ํผํฌ๋จผ์ค๊ฐ ์ข๋ค.
for TC in range(1, int(input()) + 1):
n, b = map(int, input().split())
t = list(map(int, input().split()))
a = [False] * n
stack = [(a[:], 1, 0)]
a[0] = True
stack.append((a, 1, t[0]))
result = 100000
while stack:
flag, i, count = stack.pop()
if count >= b:
if result > count:
result = count
continue
if i == n:
continue
stack.append((flag[:], i + 1, count))
flag[i] = True
stack.append((flag, i + 1, count + t[i]))
print("#{} {}".format(TC, result-b))
# method : binary subset
# for TC in range(1, int(input()) + 1):
# n, b = map(int, input().split())
# t = list(map(int, input().split()))
# result = 100000
# for i in range(1 << n):
# count = 0
# for j in range(n):
# if i & (1 << j):
# count += t[j]
# if count >= b:
# if result > count:
# result = count
# break
# print("#{} {}".format(TC, result - b))
|
pylab.ion()
def cumprobdist(ax,data,xmax=None,plotArgs={}):
if xmax is None:
xmax = numpy.max(data)
elif xmax < numpy.max(data):
warnings.warn('value of xmax lower than maximum of data')
xmax = numpy.max(data)
num_points = len(data)
X = numpy.concatenate(([0.0],data,data,[xmax]))
X.sort()
X = X[-1::-1]
Y = numpy.concatenate(([0.0],arange(num_points),arange(num_points)+1,[num_points]))/num_points
Y.sort()
line = ax.plot(X,Y,**plotArgs)
return line[0]
PLOTCOLORS = ['m','k','c','r','g','b']
fig1 = pylab.figure()
ax1 = fig1.add_subplot(111)
fig2 = pylab.figure()
ax2 = fig2.add_subplot(111)
fig3 = pylab.figure()
ax3 = fig3.add_subplot(111)
meanAbsDiffAngles,meanVarianceAngles,numTrialsStopped = [[]]*len(flies),[[]]*len(flies),[[]]*len(flies)
for i, ad in enumerate(ada):
ad[ds[i]>0] = numpy.ma.masked
#ad[mva[i]>.5] = numpy.ma.masked
meanAbsDiffAngles[i] = ad.mean(axis=1)
mva[i][ds[i]>0] = numpy.ma.masked
#mva[i][mva[i]>.5] = numpy.ma.masked
meanVarianceAngles[i] = mva[i].mean(axis=1)
numTrialsStopped[i] = sum(ds[i].data>0,axis=1) #CHECK
meanAbsDiffAngles[i][numTrialsStopped[i]>2] = numpy.ma.masked
meanVarianceAngles[i][numTrialsStopped[i]>2] = numpy.ma.masked
if meanVarianceAngles[i].compressed().size > 0:
asc = ax1.scatter(meanAbsDiffAngles[i],meanVarianceAngles[i],color=PLOTCOLORS[i])
asc.set_label(labels[i])
plotArgs = dict(color=PLOTCOLORS[i])
line = cumprobdist(ax2,meanVarianceAngles[i].compressed(),1.4,plotArgs=plotArgs)
line.set_label(labels[i])
line = cumprobdist(ax3,meanAbsDiffAngles[i].compressed(),180,plotArgs=plotArgs)
line.set_label(labels[i])
ax1.legend(loc='lower right')
ax1.set_ylabel('average anglular variance')
ax1.set_xlabel('abs diff angle')
ax2.legend(loc='upper right')
ax2.set_ylim((-.1,1.1))
ax2.set_xlabel('average anglular variance')
ax2.set_ylabel('fraction of flies')
ax3.legend(loc='upper right')
ax3.set_ylim((-.1,1.1))
ax3.set_xlabel('average difference in heading (pol unrotated - pol rotated) degrees')
ax3.set_ylabel('fraction of flies')
fig4 = pylab.figure()
ax4 = fig4.add_subplot(111)
ax4.boxplot([mada.compressed() for mada in meanAbsDiffAngles])
ax4.set_xticklabels(labels)
ax4.set_ylabel('abs diff angle')
fig5 = pylab.figure()
ax5 = fig5.add_subplot(111)
ax5.boxplot([mVa.compressed() for mVa in meanVarianceAngles])
ax5.set_xticklabels(labels)
ax5.set_ylabel('var angle')
for i, ad in enumerate(ada):
ad.mask = np.ma.nomask
mva[i].mask = np.ma.nomask
|
# from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import MalmoPython
import json
import logging
import os
import random
import sys
import time
from string import Template
class UserAgent(object):
"""User Agent for discrete state/action spaces."""
def __init__(self):
self.agent_host = None
self.logger = logging.getLogger(__name__)
if False: # True if you want to see more information
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.handlers = []
self.logger.addHandler(logging.StreamHandler(sys.stdout))
self.actions = ["north", "south", "west", "east"]
def move_direction(self, command):
"""moves the agent in the direction given"""
d = {"north": "movenorth 1", "south": "movesouth 1", "west": "movewest 1", "east": "moveeast 1"}
self.try_command(d[command])
def position_change(self, command):
"""returns the coordinate position change for a given direction"""
d = {"north": [0, 0, -1], "south": [0, 0, 1], "west": [-1, 0, 0], "east": [1, 0, 0]}
return d[command]
def turn_right(self):
self.try_command("turn 1")
def turn_left(self):
self.try_command("turn -1")
def move_north(self):
self.try_command("movenorth 1")
def move_south(self):
self.try_command("movesouth 1")
def move_west(self):
self.try_command("movewest 1")
def move_east(self):
self.try_command("moveeast 1")
def try_command(self, command):
try:
self.agent_host.sendCommand(command)
except RuntimeError as e:
self.logger.error("Failed to send command: %s \n %s" % (command, e))
def get_coordinates_from_state_info(self, info):
return [int(info['XPos']), int(info['YPos']), int(info['ZPos'])]
def take_action(self, position, world_info):
pass
def act(self, world_state):
"""take 1 action in response to the current world state"""
obs_text = world_state.observations[-1].text
# print(obs_text)
obs = json.loads(obs_text)
self.logger.debug(obs)
if not 'XPos' in obs or not 'ZPos' in obs:
self.logger.error("Incomplete observation received: %s" % obs_text)
return 0
current_s = "%d:%d" % (int(obs['XPos']), int(obs['ZPos']))
self.logger.debug("State: %s (x = %.2f, z = %.2f)" % (current_s, float(obs['XPos']), float(obs['ZPos'])))
observation_list = obs["observationarea"]
block_list = []
for i in range(0, len(observation_list), 9):
block_list.append([])
for j in range(i, i + 9, 3):
block_list[i // 9].append([])
for k in range(j, j + 3):
# print(i // 9, (j % 9) // 3, k)
# print(block_list)
block_list[i // 9][(j % 9) // 3].append(observation_list[k])
self.take_action(self.get_coordinates_from_state_info(obs), block_list)
time.sleep(0.1)
# return current_r
def run(self, agent_host):
"""run the agent on the world"""
self.agent_host = agent_host
total_reward = 0
# main loop:
world_state = self.agent_host.getWorldState()
while world_state.is_mission_running:
time.sleep(0.1)
if len(world_state.observations) > 0 and not world_state.observations[-1].text=="{}":
self.act(world_state)
for reward in world_state.rewards:
total_reward += reward.getValue()
world_state = self.agent_host.getWorldState()
for reward in world_state.rewards:
total_reward += reward.getValue()
# process final reward
self.logger.debug("Final reward: %d" % total_reward)
return total_reward
|
#!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from enum import Enum
__all__ = ["CaseResult", "SuiteResult", "ResultCode"]
class ResultCode(Enum):
UNKNOWN = -1010
SUCCESS = 0
FAILED = 1
SKIPPED = 2
@dataclass
class CaseResult:
case_id = ""
code = ResultCode.UNKNOWN.value
test_name = None
test_class = None
stacktrace = ""
run_time = 0
is_completed = False
def is_running(self):
return self.test_name is not None and not self.is_completed
@dataclass
class SuiteResult:
suite_id = ""
code = ResultCode.UNKNOWN.value
suite_name = None
test_num = 0
stacktrace = ""
run_time = 0
is_completed = False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-import unittest
import unittest
import json
from signature import MTSigner
class TestSignature(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSign(self):
fo = open("../sample.txt", "r")
str = fo.read()
fo.close()
sample = json.loads(str)
signer = MTSigner(sample['key'].encode('utf-8'))
for _map in sample['maps']:
self.assertEqual(_map['sign'], signer.sign(_map['text'].encode('utf-8')))
|
import numpy as np
import pandas as pd
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
# ๆฐๆฎๅ ่ฝฝ
train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
# ไฝฟ็จๅนณๅๅนด้พๆฅๅกซๅ
ๅนด้พไธญ็nanๅผ
train['Age'].fillna(train['Age'].mean(), inplace=True)
test['Age'].fillna(test['Age'].mean(), inplace=True)
# ไฝฟ็จๅนณๅ็ฅจไปทๅกซๅ
NANๅผ
test['Fare'].fillna(test['Fare'].mean(), inplace=True)
# ไฝฟ็จ็ปๅฝๆๅค็ๆธฏๅฃๆฅๅกซๅ
็ปๅฝๆธฏๅฃ็nanๅผ
train['Embarked'].fillna(train['Embarked'].value_counts().reset_index()['index'][0], inplace=True)
test['Embarked'].fillna(train['Embarked'].value_counts().reset_index()['index'][0], inplace=True)
# ็นๅพ้ๆฉ
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
train_features = train[features]
test_features = test[features]
train_label = train['Survived']
dvec = DictVectorizer(sparse=False)
train_features = dvec.fit_transform(train_features.to_dict(orient='record'))
test_features = dvec.transform(test_features.to_dict(orient='record'))
# Average CV score on the training set was: 0.8462620048961144
exported_pipeline = GradientBoostingClassifier(learning_rate=0.1, max_depth=5, max_features=0.55, min_samples_leaf=5, min_samples_split=3, n_estimators=100, subsample=0.7000000000000001)
exported_pipeline.fit(train_features, train_label)
results = exported_pipeline.predict(test_features)
|
import sys
f = open(sys.argv[1])
s = f.readlines()
f.close()
#Get the include list
incLibs = []
codeLines = []
for i in s:
j=i.strip()
if len(j.split()) == 2 and j.split()[0]=="include":
incLibs.append(j.split()[1])
continue
codeLines.append(i)
#Get all the library code to be attached
libCode = []
for lib in incLibs:
f = open("library/"+lib+".plt")
lc = f.readlines()
f.close()
libCode += lc
#Save the original code in .plt_tmp file
fn= open(sys.argv[1] + '_tmp','w')
for item in s:
fn.write("%s" % item)
fn.close()
#overwrite the new with this code
fullCode = libCode + codeLines
fn= open(sys.argv[1],'w')
for item in fullCode:
fn.write("%s" % item)
fn.close()
|
import enum
class ContainerStatus(enum.Enum):
CREATED = 'created'
RESTARTING = 'restarting'
RUNNING = 'running'
PAUSED = 'paused'
EXITED = 'exited'
DEAD = 'dead'
@staticmethod
def from_str(status):
if status == 'created':
return ContainerStatus.CREATED
if status == 'restarting':
return ContainerStatus.RESTARTING
if status == 'running':
return ContainerStatus.RUNNING
if status == 'paused':
return ContainerStatus.PAUSED
if status == 'exited':
return ContainerStatus.EXITED
if status == 'dead':
return ContainerStatus.DEAD
class Operation(enum.Enum):
START_CONTAINER = 'op_start_container'
STOP_CONTAINER = 'op_stop_container'
class DockerEntity(enum.Enum):
IMAGE = 'image'
CONTAINER = 'container'
NETWORK = 'network'
|
import logging
from bunch import Bunch
from django.http import JsonResponse
from rest_framework.decorators import api_view
from fof.model.model import OfflineTaskModel
from fof.service import logic_processor, manager_service
from fof.service import offline_score_service
from util import uuid_util
from util.bus_const import TaskModel
from util.exception.biz_error_handler import Error
from util.sys_constants import LOGGER_NAME, OffLineView, convert_to_dict
from util.thread_tool import ThreadTool
logger = logging.getLogger(LOGGER_NAME)
@api_view(['POST'])
def compute_manager_product(request, format=None):
"""
่ฎก็ฎๅบ้็ป็็ฎก็็ไบงๅไฟกๆฏ
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jinglichanpin, manager_service.compute_manager_product, request, uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def equ_timing(request):
"""
ๅบ้็ป็่ก็ฅจๆฉๆถ่ฝๅ่ฏไปทๆจกๅ
่ฏทไบๆฏๅญฃ็ปๆๅ็็ฌฌไธไธชๆ็15ๆฅๅผๅง่ฟ่กๆฌ็จๅบ(ๅณๅบ้ๅญฃๆฅๅๅธ)๏ผๆๆฅๆดๆฐ๏ผ่ฟ่ก่ณ่ฏฅๆๆซ
ๅฆ1ๅญฃๅบฆ็ปๆๅ๏ผไบ4ๆ15ๆฅ~4ๆ30ๆฅๆฏๆฅๆดๆฐ่ฏฅๆฐๆฎ
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliNengli, manager_service.equ_timing, request, uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def industry_config_indust(request):
"""
็ฆป็บฟ่ฎก็ฎๅบ้็ป็่กไธ้
็ฝฎ่ฝๅ
:keyword ่กจ fof_fund_stock_industry
่ฏทไบๆฏๅญฃ็ปๆๅ็็ฌฌไธไธชๆ็15ๆฅๅผๅง่ฟ่กๆฌ็จๅบ(ๅณๅบ้ๅญฃๆฅๅๅธ)๏ผๆๆฅๆดๆฐ๏ผ่ฟ่ก่ณ่ฏฅๆๆซ
ๅฆ1ๅญฃๅบฆ็ปๆๅ๏ผไบ4ๆ15ๆฅ~4ๆ30ๆฅๆฏๆฅๆดๆฐ่ฏฅๆฐๆฎ
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliPeizhiNengli_stock, manager_service.industry_config_indust, request, uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def industry_config_score(request):
"""
็ฆป็บฟ่ฎก็ฎๅบ้็ป็่กไธ้
็ฝฎ่ฝๅ
:keyword ่กจ fof_fund_industry_score
่ฏทไบๆฏๅญฃ็ปๆๅ็็ฌฌไธไธชๆ็15ๆฅๅผๅง่ฟ่กๆฌ็จๅบ(ๅณๅบ้ๅญฃๆฅๅๅธ)๏ผๆๆฅๆดๆฐ๏ผ่ฟ่ก่ณ่ฏฅๆๆซ
ๅฆ1ๅญฃๅบฆ็ปๆๅ๏ผไบ4ๆ15ๆฅ~4ๆ30ๆฅๆฏๆฅๆดๆฐ่ฏฅๆฐๆฎ
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliPeizhiNengli_score, manager_service.industry_config_score, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def industry_config_avgscore(request):
"""
็ฆป็บฟ่ฎก็ฎๅบ้็ป็่กไธ้
็ฝฎ่ฝๅ
:keyword ่กจ fof_fund_industry_avgscore
่ฏทไบๆฏๅญฃ็ปๆๅ็็ฌฌไธไธชๆ็15ๆฅๅผๅง่ฟ่กๆฌ็จๅบ(ๅณๅบ้ๅญฃๆฅๅๅธ)๏ผๆๆฅๆดๆฐ๏ผ่ฟ่ก่ณ่ฏฅๆๆซ
ๅฆ1ๅญฃๅบฆ็ปๆๅ๏ผไบ4ๆ15ๆฅ~4ๆ30ๆฅๆฏๆฅๆดๆฐ่ฏฅๆฐๆฎ
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliPeizhiNengli_avgscore, manager_service.industry_config_avgscore, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
# ็ญ้่ฝๅ
@api_view(['POST'])
def return_total(request):
"""
ๅบ้็ป็่ก็ฅจ็ญ้่ฝๅ
table: fof_fund_excess_return_total
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliShaixuanNengli_return_total, manager_service.return_total, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def return_weight(request):
"""
ๅบ้็ป็่ก็ฅจ็ญ้่ฝๅ
fof_fund_excess_return_weight
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliShaixuanNengli_return_weight, manager_service.return_weight, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def return_(request):
"""
ๅบ้็ป็่ก็ฅจ็ญ้่ฝๅ
table: fof_fund_main_stock_return
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliShaixuanNengli_return, manager_service.return_, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def return_his(request):
"""
ๅบ้็ป็่ก็ฅจ็ญ้่ฝๅ
table: fof_fund_main_stock_return_his
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jingliShaixuanNengli_return_his, manager_service.return_his, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def net_value(request):
"""
ๅบ้ๅๅผ้ฃๆ ผๅๅ fof_fundnav_style
่่ๅฐๆๅกๅจ็ๆฟ่ฝฝ่ฝๅ๏ผ่ฏฅ็จๅบๅๆๅฏๆฏๅจๆดๆฐ๏ผๅ็ปญๆๅกๅจ่ฟ่ฝฝ่ฝๅๅ ๅคง๏ผๅฏๆนไธบๆฏๆฅๆดๆฐ
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jinglifengge_profit_style, manager_service.net_value, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(['POST'])
def hand_turn_over(request):
"""
่ฝๅๅๆ-ๆ่ก้ไธญๅบฆใๆขๆ็ fof_fund_stock_porfolio
่ฏฅ็จๅบไบๆฏๅๅนด่ฟ่กไธๆฌกๆดๆฐ
่ฏทไบๆฏๅนด็3ๆ20ๆฅ~3ๆ31ๆฅไปฅๅ8ๆ20ๆฅ~8ๆ31ๆฅๆดๆฐ
็ฑไบ็จๅบ่ฟ่ก้ไธๅคง๏ผ่ฅๆดๆฐๆถ้ด้
็ฝฎ้บป็ฆ๏ผๅฏ่ฎพๅฎไธบๆฏๆฅๆดๆฐ
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jinglifengge_hand_change_rate, manager_service.hand_turn_over, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
@api_view(["POST"])
def holding_style_main(request):
"""
้ฃๆ ผๅๆ-ๆไป้ฃๆ ผ fof_fund_tentop_stock_style
้ไป่กๆฐ้ฃๆ ผๆด้ฒๆฐๆฎ๏ผ่ฏทไบๆฏๅญฃ็ปๆๅ็็ฌฌไธไธชๆ็15ๆฅๅผๅง่ฟ่กๆฌ็จๅบ๏ผๆๆฅๆดๆฐ๏ผ่ฟ่ก่ณ่ฏฅๆๆซ
ๅ
จ้จๆไปๆฐๆฎ๏ผ่ฏทไบๆฏๅนด็8ๆ21ๆฅ~8ๆ31ๆฅ๏ผไปฅๅ3ๆ21ๆฅ~3ๆ31ๆฅ่ฟ่ก
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jinglifengge_holding_stype_main, manager_service.holding_style_main, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view))
def holding_style_all(request):
"""
้ฃๆ ผๅๆ - ๆไป้ฃๆ ผ
fof_fund_stock_style
:param request:
:return:
"""
uuid = uuid_util.gen_uuid()
model = OfflineTaskModel(TaskModel.jinglifengge_holding_stype_all, manager_service.holding_style_all, request,
uuid)
ThreadTool.pool.submit(logic_processor.doLogic, (model,))
view = OffLineView(uuid)
return JsonResponse(convert_to_dict(view)) |
import xml.etree.ElementTree as ET
import re # regex
import numpy as np
import pandas as pd
def search(root, term):
reg = re.compile(term)
list = []
if reg.search(root.tag.lower()):
list.append(root)
for i in range(len(root)):
search_list = search(root[i], term)
try:
for item in search_list:
list.append(item)
except:
pass
return list
class Entity():
"""" An entity is a class in bridge represent entity, features or attributes """
def __init__(self, root):
self.entity = root
self.dict = []
self.children = []
try:
self.name = root.attrib['name']
except:
try:
self.name = root.attrib['id']
except:
self.name = "Unknown"
try:
self.type = self.entity.tag[len('omg.org/UML1.3')+2:]
except:
self.type = self.entity.tag
self.tag = self.entity.tag
def build_children(self, term):
for child in self.get_term(term):
child_entity = Entity(child)
self.children.append(child_entity)
self.dict.append(child_entity.name)
def get_term(self, term):
f = search(self.entity, str(term)+"$")
children = []
if len(f) > 0:
children = f[0].getchildren()
return children
def get_features(self):
return self.get_term("feature")
def get_attributes(self):
return self.get_term("attribute")
def is_fit(self, term, case_sensitive = False):
# try:
# term = re.compile(str(term).lower())
# except:
# return False
for word in self.dict:
if term.strip().find(word.strip()) > -1:
# if term.search(word):
# print(word + " : " + term)
return True
return False
class Bridge():
""" A python instantiation of bridge (as a collection of entities (features and attributes)"""
def __init__(self, path):
self.tree = ET.parse('BRIDGE.xmi')
self.root = self.tree.getroot()
self.classes = self.search(self.root, "class$")
def search(self, root, term):
reg = re.compile(term)
list = []
if reg.search(root.tag.lower()):
list.append(Entity(root))
for index in range(len(root)):
temp_list = self.search(root[index], term)
try:
for item in temp_list:
list.append(item)
except:
pass
return list
def build_dict(self, dataset):
for index in range(len(dataset)):
entity = dataset.iloc[index][0]
bag_of_words = dataset.iloc[index][1]
cls = self.get_class(entity)
for word in bag_of_words.split(','):
cls.dict.append(word)
def get_fit(self, term, case_sensitive = False):
list = []
for cls in self.classes:
for word in term.split(" "):
try:
# print(term + " : " + word)
if cls.is_fit(word, case_sensitive):
list.append(cls.name)
# print(cls.name)
except:
pass
return list
def get_class(self, name, case_sensitive = False):
for cls in self.classes:
if case_sensitive:
if cls.name==name:
return cls
else:
if cls.name.lower() == str(name).lower():
return cls
return False
def search_class(self, term, case_sensitive = False):
reg = re.compile(term)
list = []
for cls in self.classes:
if case_sensitive:
if reg.search(cls.name):
list.append(cls)
else:
if reg.search(cls.name.lower()):
list.append(cls)
return list
# bridge = Bridge('BRIDGE.xmi')
# dataset = pd.read_csv('bridge_map.csv')
# bridge.build_dict(dataset)
#
# # examples
#
# # how to search a class in bridge
# for entity in bridge.search_class('bio'):
# print(entity.name)
#
# # how to print class features by name
# for feature in bridge.get_class("BiologicEntity").get_features():
# print(feature.attrib["name"])
#
# # find an entities that related to 'terribly patient death'
# bridge.get_fit('the terribly patient death')
|
s = input('่ฏท่พๅ
ฅ้คๆฐ๏ผ')
try:
result = 20 / int(s)
print('20้คไปฅ%s็็ปๆๆฏ๏ผ%g' % (s, result))
except ValueError:
print('ๅผ้่ฏฏ๏ผๅฟ
้กป่พๅ
ฅๆฐๅผ๏ผ')
except ArithmeticError:
print('็ฎๆฏ้่ฏฏ๏ผไธ่ฝ่พๅ
ฅ0')
else:
print('ๆฒกๆๅบ็ฐๅผๅธธ')
|
#!/usr/bin/env python
#
# MagicaVoxel2MinecraftPi
#
from voxel_util import create_voxel, post_to_chat, ply_to_positions
from magicavoxel_axis import axis
from all_clear import clear
from time import sleep
# polygon file format exported from MagicaVoxel
ply_file = 'piyo.ply'
# Origin to create (Minecraft)
x0 = 0
y0 = 5
z0 = 0
# Rotation degree (MagicaVoxel)
alpha = 0 # x-axis
beta = 0 # y-axis
gamma = 0 # z-axis
model_settings = {
'x0': x0,
'y0': y0,
'z0': z0,
'alpha': alpha,
'beta': beta,
'gamma': gamma,
}
clear()
post_to_chat('create polygon file format model')
box_positions = ply_to_positions(ply_file)
create_voxel(box_positions, model_settings)
|
import sqlite3
import os
import pandas as pd
# get file name and create a database
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_csv_file = os.path.join(BASE_DIR, 'buddymove_holidayiq.csv')
db_file = os.path.join(BASE_DIR, 'buddymove_holidayiq.sqlite3') #new db
def create_connection(db_file):
"""Create a database connection to SQLite specified by db_file"""
conn = None
try:
conn = sqlite3.connect(db_file)
except sqlite3.Error as e:
print("Error in connection", e)
return conn
def load_data(CONN):
"""use pandas to read and check csv and load into database"""
df = pd.read_csv(db_csv_file)
# Check dataframe values and nulls
assert df.shape == (249,7)
assert all(df.notna())
# load data into db and create a table
df.to_sql(name='review', con=CONN, if_exists='replace')
def get_row_count(conn):
"""Fetch number of rows from created database"""
cur = conn.cursor()
cur.execute(
"""
SELECT *
FROM review
"""
)
return len(cur.fetchall())
def get_nature_shopper_count(conn):
"""
count users who reviewed at least 100 Nature in the category
and also reviewed at least 100 in the Shopping category
"""
cur = conn.cursor()
cur.execute(
"""
SELECT COUNT(Shopping)
FROM review
WHERE Nature >= 100
"""
)
return cur.fetchall()[0][0]
def main():
"""Print results from queries"""
CONN = create_connection(db_file)
# use connection to load data
load_data(CONN)
# Confirm rows in database equate to rows in dataframe
row_counts = get_row_count(CONN)
print(f"There are {row_counts} rows in the data base")
# Print Nature & Shopper Relationship
ns_count = get_nature_shopper_count(CONN)
print(f"Total users who reviewed 100 Nature and Shopper locations: {ns_count}")
# close connection to db
CONN.close()
if __name__ == "__main__":
main() |
def power(N, P):
if P == 0 or P == 1 :
return N
else:
return (N*power(N, P-1))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 19:35:54 2019
@author: Rizwan1
"""
import pandas as pd
import nltk
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
data = pd.read_csv("D:\\typed_comments.csv",chunksize=1)
df = pd.DataFrame(columns=['comment'])
j=0
for i in data:
#split into words
tokens = word_tokenize(i.iat[0,19])
#convert to lower case
tokens = [w.lower() for w in tokens]
#remove punctuation
table = str.maketrans('','',string.punctuation)
stripped = [w.translate(table) for w in tokens]
#retain alphabetic elements
words = [word for word in stripped if word.isalpha()]
#remove stop words
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
#stem
stemmed = [porter.stem(word) for word in words]
df.append({'comment':stemmed},ignore_index=True,sort=None,verify_integrity=False)
j=j+1
if j==1000:
break;
df.to_csv('out4.csv',mode='w')
|
# -*- coding: utf-8 -*-
""" Coaffect Visuals Module
Core Objects:
Visuals
"""
import datetime
from .visual import Visual
__all__ = ["Visual"]
__title__ = 'visuals'
__version__ = '0.1.0'
__license__ = 'MIT'
__copyright__ = 'Copyright %s Stanford Collective Emotion Team' % datetime.date.today().year
|
import pandas as pd
import pickle
import numpy as np
import sys
predictfile_path = sys.argv[1]
predict_file = pd.read_csv(predictfile_path)
predict_file_og = predict_file
predict_file['Gender'].fillna(predict_file['Gender'].mode()[0], inplace=True)
predict_file['Self_Employed'].fillna(predict_file['Self_Employed'].mode()[0], inplace=True)
predict_file['Credit_History'].fillna(predict_file['Credit_History'].mode()[0], inplace=True)
predict_file['Loan_Amount_Term'].fillna(predict_file['Loan_Amount_Term'].mode()[0], inplace=True)
predict_file['Dependents'].fillna(predict_file['Dependents'].mode()[0], inplace=True)
predict_file['LoanAmount'].fillna(predict_file['LoanAmount'].median(),inplace=True)
predict_file['LoanAmount_log'] = np.log(predict_file['LoanAmount'])
predict_file['Total_Income']=predict_file['ApplicantIncome']+predict_file['CoapplicantIncome']
predict_file['EMI']=predict_file['LoanAmount']/predict_file['Loan_Amount_Term']
predict_file['Balanced_Income']=predict_file['Total_Income']-predict_file['EMI']
predict_file = predict_file.drop(['Loan_ID','LoanAmount','ApplicantIncome','CoapplicantIncome','Loan_Amount_Term'],axis=1)
predict_file = pd.get_dummies(predict_file)
model_path = 'Model/final_models/lr_model.sav'
model = pickle.load(open(model_path,'rb'))
prediction = model.predict(predict_file)
submission = pd.read_csv('Dataset/sample_submission_49d68Cx.csv')
submission['Loan_Status']=prediction
submission['Loan_ID']=predict_file_og['Loan_ID']
submission['Loan_Status'].replace(0,'N',inplace=True)
submission['Loan_Status'].replace(1,'Y',inplace=True)
mean_rows = int(submission.shape[0]/2)
predsplit_1 = submission.iloc[:mean_rows].to_json(orient='records')
predsplit_2 = submission.iloc[mean_rows:].to_json(orient='records')
complete_str = (predsplit_1+predsplit_2)
complete_str = complete_str.replace("][",",")
print(complete_str)
sys.stdout.flush() |
import requests
import json
# query func
def webex_api(url, headers, params):
if params == {}:
res = requests.get(url, headers=headers)
else:
res = requests.get(url, headers=headers, params=params)
return res
# PrettyPrinter
def webex_print(res):
formatted_message = """
Webex Teams API Response
-------------------------------------
Response Status Code : {}
Response Link Header : {}
Response Body : {}
-------------------------------------
""".format(res.status_code, res.headers.get('Link'), json.dumps(res.json(), indent=4))
print(formatted_message)
# input access token here
access_token = "ODFkZTMxNTctMTc2Ny00MTYwLWJkNDItNzBiNDNjNmUxNDdhYzk5NzlhMzItNWEy_PF84_e271494a-7cc7-4aed-badb-78d7029ffc5e"
headers = {
'Authorization': 'Bearer {}'.format(access_token),
'Content-Type': 'application/json'
}
url = 'https://api.ciscospark.com/v1/people/me'
res = webex_api(url, headers, params={})
res = requests.get(url, headers=headers)
#print Auth info
print(json.dumps(res.json(), indent=4))
# print memberships
url = 'https://api.ciscospark.com/v1/rooms'
params = {
"max": 10
}
res = webex_api(url, headers, params)
webex_print(res)
#print(json.dumps(res.json(), indent=4))
url = 'https://api.ciscospark.com/v1/people'
params = {
'email': 'andy.ford@ascenaretail.com'
}
res = webex_api(url, headers, params)
print(res.json())
|
"""
GREP Plugin for Logout and Browse cache management
NOTE: GREP plugins do NOT send traffic to the target and only grep the HTTP Transaction Log
"""
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Searches transaction DB for Cache snooping protections"
def run(PluginInfo):
title = "This plugin looks for server-side protection headers and tags against cache snooping<br />"
Content = plugin_helper.HtmlString(title)
Content += plugin_helper.FindResponseHeaderMatchesForRegexpName(
"HEADERS_FOR_CACHE_PROTECTION"
)
Content += plugin_helper.FindResponseBodyMatchesForRegexpName(
"RESPONSE_REGEXP_FOR_CACHE_PROTECTION"
)
return Content
|
import discord
from discord.ext import commands
description = 'Corp Bot made by ApparenticBubbles.'
bot_prefix = 'corp?'
client = commands.Bot(description=description, command_prefix=bot_prefix)
@client.event
async def on_ready():
print('Logged in')
print('Name : {}'.format(client.user.name))
print('ID : {}'.format(client.user.id))
print(discord.__version__)
print('======== Corp Console ========')
@client.command(pass_context=True)
async def ping(ctx):
"""Pong."""
await client.say("""Pong""")
@client.command(pass_context=True)
async def info(ctx):
"""Information"""
await client.say("""Corp Server: https://discord.gg/qDZBRxu If you are banned, we are not going to unban you unless you can actually prove yourself right and that you should be unbanned.""")
@client.command(pass_context=True)
async def developers(ctx):
"""Corp's Developers."""
await client.say("""MAIN DEVELOPER: ApparenticBubbles
Developers:Train#1115, Sage#3568""")
@client.command(pass_context=True)
async def apparenticbubbles(ctx):
"""ApparenticBubbles"""
await client.say("""ApparenticBubbles is Corp's main Developer working 24/7. ApparenticBubbles is hard working proberally right now.""")
@client.command(pass_context=True)
async def sage(ctx):
"""Sage"""
await client.say("""Sage is one of Corp's Developers. He codes other bots too!""")
client.run('token(NOT SHOWEN TO PUBLIC)')
|
import pytest
import numpy as np
import audtorch as at
xfail = pytest.mark.xfail
@pytest.mark.parametrize('nested_list,expected_list', [
([1, 2, 3, [4], [], [[[[[[[[[5]]]]]]]]]], [1, 2, 3, 4, 5]),
([[1, 2], 3], [1, 2, 3]),
([1, 2, 3], [1, 2, 3]),
])
def test_flatten_list(nested_list, expected_list):
flattened_list = at.utils.flatten_list(nested_list)
assert flattened_list == expected_list
@pytest.mark.parametrize('input,tuple_len,expected_output', [
('aa', 2, ('a', 'a')),
(2, 1, (2,)),
(1, 3, (1, 1, 1)),
((1, (1, 2)), 2, (1, (1, 2))),
([1, 2], 2, (1, 2)),
pytest.param([1], 2, [], marks=xfail(raises=ValueError)),
pytest.param([], 2, [], marks=xfail(raises=ValueError)),
])
def test_to_tuple(input, tuple_len, expected_output):
output = at.utils.to_tuple(input, tuple_len=tuple_len)
assert output == expected_output
@pytest.mark.parametrize('input,expected_output', [
(np.array([[2, 2]]), 8),
])
def test_energy(input, expected_output):
output = at.utils.energy(input)
assert output == expected_output
@pytest.mark.parametrize('input,expected_output', [
(np.array([[2, 2]]), 4),
])
def test_power(input, expected_output):
output = at.utils.power(input)
assert output == expected_output
@pytest.mark.parametrize('n_workers,task_fun,params', [
(3, lambda x, n: x ** n, [(2, n) for n in range(10)]),
])
def test_run_worker_threads(n_workers, task_fun, params):
list1 = at.utils.run_worker_threads(n_workers, task_fun, params)
list2 = [task_fun(*p) for p in params]
assert len(list1) == len(list2) and list1 == list2
|
import socket #Biblioteca responsรกvel por habilitar os sockets de redes do computador/S.O
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Make the conection UDP/IP.
ip_dominio = "192.168.0.18" #IP/DOMINIO.
serverPort = 3000 #Server port.
entrada = 'teste' # Input that will be passed to the server. GET / HTTP/1.1\nhost: google.com\n\n
client.connect((ip_dominio, serverPort))
try:
while (input("Digite 0 para fechar a conexรฃo: ") != '0'):
client.send((input("Vocรช: ") + "\n").encode('utf-8')) #Send message for the other side and Starting the connection.
# .encode('utf-8') -> Is used for convert the str to bytes
resposta = client.recv(1024) #Where will receive the message(Quantidade de bytes que podem ser recebidos).
print(resposta) #To print the message.
client.sendto("\nMESSAGE ENDED...\n".encode('utf-8'), (ip_dominio, serverPort))
client.close() #Close the connection
print("MESSAGE ENDED")
except Exception as erro:
print(erro) #If the connection not be correct, will show the error.
client.close() |
#!/usr/bin/python
import sys
import re
def checkScript():
""" Outputs lines which contains comma and/or quote
It is here to monitor (and check) if the preprocessed file is still
a well-built csv file
"""
with open(sys.argv[1]) as fread:
while True:
line = fread.readline()
if not line:
break
count_comma = line.count(',')
count_quote = line.count('"')
# if count_quote != 0:
# print(count_quote, line)
if count_comma != 0 and count_comma != 10 :
print(count_comma, line)
# if count_quote != 0 :
# if count_comma != 0 and count_comma != 10 :
# print(count_comma, line)
# cntcm = line.count(',')
# cntqt = line.count('"')
# if cntqt != 0 and cntcm != 0 :
# if cntcm != 10 :
# if cntqt > 2 :
# groups = line.split('"')
# test = '"'.join(groups[:cntqt]), '"'.join(groups[cntqt:])
# if test[0].count(',') == 10 and count_comma != 10:
# print(count_comma, line)
# return
def main():
""" Provides an argument : a path to the csv file (including the name of the csv) """
if len(sys.argv) != 2:
print("One argument is necessary : the path to the csv file")
return -1
checkScript()
if __name__ == '__main__':
main()
|
"""sexadvices URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.urls import include, path
from rest_framework import routers
# Routers provide an easy way of automatically determining the URL conf.
from . import views
app_name = "api"
router = routers.DefaultRouter(trailing_slash=False)
# router.register(r'users', views.UserViewSet.as_view({'get': 'list'}))
# router.register(r'users/<pk>/', views.UserViewSet.as_view({'get': 'retrieve'}))
router.register(r'suggestions', views.SuggestionViewSet)
router.register(r'items', views.ItemViewSet)
router.register(r'categories', views.DeviationViewSet)
urlpatterns = [
# path('users', views.UserList.as_view()),
path('users/current', views.CurrentUserView.as_view()),
# path('users/<pk>', views.UserDetails.as_view()),
url(r'^', include(router.urls)),
]
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Spotify AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import logging
import tensorflow as tf
from .dataset import Datasets
FLAGS = tf.flags.FLAGS.flag_values_dict()
class Trainer(object):
"""Entry point to train/evaluate estimators."""
@staticmethod
def __split_features_label_fn(parsed_features):
target = parsed_features.pop("target")
return parsed_features, target
@staticmethod
def __get_default_training_data_dir():
from os.path import join as pjoin
return pjoin(FLAGS["training-set"], FLAGS["train-subdir"])
@staticmethod
def __get_default_eval_data_dir():
from os.path import join as pjoin
return pjoin(FLAGS["training-set"], FLAGS["eval-subdir"])
@staticmethod
def __get_default_run_config():
return tf.contrib.learn.RunConfig(model_dir=FLAGS["job-dir"])
@staticmethod
def __get_default_experiment_fn(estimator,
training_data_dir,
eval_data_dir,
feature_mapping_fn,
split_features_label_fn):
def in_fn():
train_input_it, _ = Datasets.mk_iter(training_data_dir,
"evaluation-input",
feature_mapping_fn)
return split_features_label_fn(train_input_it.get_next())
def eval_fn():
eval_input_it, _ = Datasets.mk_iter(eval_data_dir,
"training-input",
feature_mapping_fn)
return split_features_label_fn(eval_input_it.get_next())
def do_make_experiment(run_config, params):
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=in_fn,
eval_input_fn=eval_fn)
return do_make_experiment
@staticmethod
def get_default_run_config(job_dir=FLAGS["job-dir"]):
"""Returns a default `RunConfig` for `Estimator`."""
# this weird try/except is a static variable pattern in python
# https://stackoverflow.com/questions/279561/what-is-the-python-equivalent-of-static-variables-inside-a-function/16214510#16214510
try:
return Trainer.get_default_run_config.default_config
except AttributeError:
assert job_dir is not None, "Please pass a non None job_dir"
Trainer.get_default_run_config.default_config = tf.contrib.learn.RunConfig(
model_dir=job_dir)
return Trainer.get_default_run_config.default_config
@staticmethod
def run(estimator,
training_data_dir=None,
eval_data_dir=None,
feature_mapping_fn=None,
split_features_label_fn=None,
run_config=None,
experiment_fn=None):
"""Make and run an experiment based on given estimator.
Args:
estimator: Your estimator to train on. See official TensorFlow documentation on how to
define your own estimator.
training_data_dir: Directory containing training data.
Default value is based on `Flags`.
eval_data_dir: Directory containing training data. Default value is based on `Flags`.
feature_mapping_fn: A function which maps feature spec line to `FixedLenFeature` or
`VarLenFeature` values. Default maps all features to
tf.FixedLenFeature((), tf.int64, default_value=0).
split_features_label_fn: Function used split features into examples and labels.
run_config: `RunConfig` for the `Estimator`. Default value is based on `Flags`.
experiment_fn: Function which returns an `Experiment`. Default value is based on
`Flags` and is implementation specific.
"""
training_data_dir = training_data_dir or Trainer.__get_default_training_data_dir()
eval_data_dir = eval_data_dir or Trainer.__get_default_eval_data_dir()
run_config = run_config or Trainer.__get_default_run_config()
experiment_fn = experiment_fn or Trainer.__get_default_experiment_fn(estimator,
training_data_dir,
eval_data_dir,
feature_mapping_fn,
split_features_label_fn
)
logging.info("Training data directory: `%s`", training_data_dir)
logging.info("Evaluation data directory: `%s`", eval_data_dir)
tf.contrib.learn.learn_runner.run(experiment_fn=experiment_fn,
run_config=run_config)
|
"""
http://www.geeksforgeeks.org/level-maximum-number-nodes/
Find the level in a binary tree which has maximum number of nodes. The root is at level 0.
Examples:
Input :
2
/ \
1 3
/ \ \
4 6 8
/
5
Output : 2
2
/ \
1 3
/ \ \
4 6 8 [Level with maximum nodes = 3]
/
5
"""
from binary_tree import *
def max_level_node(root):
queue = []
queue.append(root)
d = {}
level = 0
while queue:
d[level] = len(queue)
for i in range(len(queue)):
node = queue.pop()
if node.left:
queue.insert(0, node.left)
if node.right:
queue.insert(0, node.right)
level += 1
sorted_dict = sorted(d.items(), key=lambda x: x[1], reverse=True)
return sorted_dict[0][0]
if __name__ == '__main__':
tree = BinaryTree(2)
tree.insert_left(1)
tree.insert_right(3)
tree.left.insert_left(4)
tree.left.insert_right(6)
tree.right.insert_right(8)
tree.left.right.insert_left(6)
print max_level_node(tree)
|
a=float(input("valor de a: "))
b=float(input("valor de b: "))
c=float(input("valor de c: "))
d=float(input("valor de d: "))
e=float(input("valor de e: "))
f=float(input("valor de f: "))
p1=(a/d)
p2=(b/e)
if (p1 != p2):
x=((c*e)-(b*f))/((a*e)-(b*d))
y=((a*f)-(c*d))/((a*e)-(b*d))
print(x)
print(y)
else:
print("Nao tem solucao")
|
class DatabaseConfig(object):
dbhost = 'localhost'
dbuser = 'root'
dbpassword = 'Skipper2605'
dbname = 'civil_crime_database'
class Config(object):
PORT = 5000
DEBUG = True
threaded = True
class DevelopmentConfig(object):
ENV='development'
DEVELOPMENT = True
DEBUG = True |
# Teste seu cรณdigo aos poucos.
# Nรฃo teste tudo no final, pois fica mais difรญcil de identificar erros.
# Use as mensagens de erro para corrigir seu cรณdigo.
consumo = float(input("Digite o consumo: "))
tipo = input("tipo de consumo: ").upper()
r = consumo*(0.44)
r1 = consumo*(0.65)
c = consumo*(0.55)
c1 = consumo*(0.60)
i = consumo*(0.55)
i1 = consumo*(0.60)
print("Entradas:",consumo,"KWh e tipo", tipo)
if (consumo <= 500 and tipo == "R"):
print("Valor total: R$",round(r, 2))
elif(consumo > 500 and tipo == "R"):
print("Valor total: R$",round(r1, 2))
elif(consumo <= 1000 and tipo == "I"):
print("Valor total: R$",round(i, 2))
elif(consumo > 1000 and tipo == "I"):
print("Valor total: R$",round(i1, 2))
elif(consumo <= 5000 and tipo == "C"):
print("Valor total: R$",round(c, 2))
elif(consumo > 5000 and tipo == "C"):
print("Valor total: R$",round(c1, 2))
else:
print("Dados invalidos") |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-22 13:57
from __future__ import unicode_literals
from django.conf import settings
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('calligraphy', '0082_auto_20170321_2259'),
]
operations = [
migrations.CreateModel(
name='Character_orig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_name', models.CharField(blank=True, max_length=64)),
('parent_work_name', models.CharField(blank=True, max_length=64)),
('mark', models.CharField(blank=True, max_length=64)),
('x1', models.IntegerField(blank=True)),
('y1', models.IntegerField(blank=True)),
('x2', models.IntegerField(blank=True)),
('y2', models.IntegerField(blank=True)),
('image', models.ImageField(blank=True, storage=django.core.files.storage.FileSystemStorage(), upload_to='')),
('image_width', models.IntegerField(default=0)),
('image_height', models.IntegerField(default=0)),
('parent_author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='calligraphy.Author')),
('parent_page', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='calligraphy.Page')),
('parent_work', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='calligraphy.Work')),
('supplied_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torch.nn.init
class CNN(torch.nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.keep_prob = 0.5
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer3 = torch.nn.Sequential(
torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
)
self.fc1 = torch.nn.Linear(4 * 4 * 128, 625, bias=True)
torch.nn.init.xavier_uniform_(self.fc1.weight)
self.layer4 = torch.nn.Sequential(
self.fc1,
torch.nn.ReLU(),
torch.nn.Dropout(p = 1 - self.keep_prob)
)
self.fc2 = torch.nn.Linear(625, 10, bias=True)
torch.nn.init.xavier_uniform_(self.fc2.weight)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0), -1)
out = self.layer4(out)
out = self.fc2(out)
return out
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(777)
if device == 'cuda':
torch.cuda.manual_seed_all(777)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
mnist_train = dsets.MNIST(
root='mnist',
train=True,
transform=transforms.ToTensor(),
download=True)
mnist_test = dsets.MNIST(
root='mnist',
train=False,
transform=transforms.ToTensor(),
download=True)
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=True,
drop_last=True)
model = CNN().to(device)
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_batch = len(data_loader)
print('์ด ๋ฐฐ์น์ ์ : {}'.format(total_batch))
for epoch in range(training_epochs):
avg_cost = 0
for X, Y in data_loader:
X = X.to(device)
Y = Y.to(device)
optimizer.zero_grad()
hypothesis = model(X)
cost = criterion(hypothesis, Y)
cost.backward()
optimizer.step()
avg_cost += cost / total_batch
print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost))
with torch.no_grad():
X_test = mnist_test.data.view(len(mnist_test), 1, 28, 28).float().to(device)
Y_test = mnist_test.targets.to(device)
prediction = model(X_test)
correct_prediction = torch.argmax(prediction, 1) == Y_test
accuracy = correct_prediction.float().mean()
print('Accuracy: ', accuracy.item())
|
from .. import utils
#from .api import API
import psycopg2
from PIL import Image
from io import BytesIO
import requests
class data:
@staticmethod
def repr(obj):
items = []
for prop, value in obj.__dict__.items():
try:
item = "%s = %r" % (prop, value)
assert len(item) < 20
except:
item = "%s: <%s>" % (prop, value.__class__.__name__)
items.append(item)
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def __init__(self, cls):
cls.__repr__ = data.repr
self.cls = cls
def __call__(self, *args, **kwargs):
return self.cls(*args, **kwargs)
#@data
class Dataset:
"""remo long desc """
__doc__ = "dataset from remo!"
def __repr__(self):
return "Dataset {} - '{}'".format(self.id, self.name)
def __init__(self, sdk, **kwargs):
self.sdk = sdk
self.id = kwargs.get('id')
self.name = kwargs.get('name')
self.annotation_sets = kwargs.get('annotation_sets')
self.created_at = kwargs.get('created_at')
self.license = kwargs.get('license')
self.is_public = kwargs.get('is_public')
self.users_shared = kwargs.get('users_shared')
self.top3_classes = kwargs.get('top3_classes')
self.total_classes = kwargs.get('total_classes')
self.total_annotation_objects = kwargs.get('total_annotation_objects')
def __str__(self):
return 'Dataset (id={}, name={})'.format(self.id, self.name)
def upload(self, files=[], urls=[], annotation_task=None, folder_id=None):
'''
uploads the dataset to existing one
'''
return self.sdk.upload_dataset(self.id, files, urls, annotation_task, folder_id)
def fetch(self):
dataset = self.sdk.get_dataset(self.id)
self.__dict__.update(dataset.__dict__)
def browse(self):
utils.browse(self.sdk.ui.dataset_url(self.id))
def annotate(self):
# TODO: select by annotation task
print(self.annotation_sets)
if len(self.annotation_sets) > 0:
utils.browse(self.sdk.ui.annotate_url(self.annotation_sets[0]))
else:
print("No annotation sets in dataset " + self.name)
def images(self, folder_id = None, **kwargs):
return self.sdk.list_dataset_images(self.id,folder_id = None, **kwargs)
def search(self, **kwargs):
pass
def ann_statistics(self):
#cur = self.sdk.con.cursor()
# we won't need this after arranging endpoints
con = psycopg2.connect(database=.., user=.., password=.. host=.., port=..)
cur = con.cursor()
query = "SELECT t.* FROM public.annotation_set_statistics t where dataset_id = %s"
cur.execute(query % self.id)
rows = cur.fetchall()
statistics = dict()
for row in rows:
statistics["Annotation SET ID "] = row[1]
statistics["Classes"] = row[2]
statistics["Tags"] = row[4]
statistics["Top3 Classes"] = row[5]
statistics["Total Classes"] = row[6]
statistics["Total Annotated Images"] = row[7]
statistics["Total Annotation Objects"] = row[8]
self.sdk.con.close()
return statistics
def get_images(self, cls=None, tag=None):
# TODO: add class and tags
dataset_details = self.sdk.all_info_datasets()
dataset_info = None
for res in dataset_details['results']:
if res['id'] == self.id:
dataset_info = res
url_ = dataset_info.get('image_thumbnails')[0]['image']
bytes_ = (requests.get(url_)).content
# TODO: get list of the images
rawIO = BytesIO(bytes_)
return rawIO
def show_images(self, cls=None, tag=None):
# TODO: redirect to ui with endpoints
img = self.get_images(cls, tag)
return Image.open(img)
def show_objects(self, cls, tag):
pass
|
'''
๋ฌธ์ )
์ค๊ท๊ฐ ์ฌ๋ ๋๋ผ๋ ์ฐ๋ฆฌ๊ฐ ์ฌ์ฉํ๋ ์ฐ๋์ ๋ค๋ฅธ ๋ฐฉ์์ ์ด์ฉํ๋ค. ์ค๊ท๊ฐ ์ฌ๋ ๋๋ผ์์๋ ์ 3๊ฐ๋ฅผ ์ด์ฉํด์ ์ฐ๋๋ฅผ ๋ํ๋ธ๋ค.
๊ฐ๊ฐ์ ์๋ ์ง๊ตฌ, ํ์, ๊ทธ๋ฆฌ๊ณ ๋ฌ์ ๋ํ๋ธ๋ค.
์ง๊ตฌ๋ฅผ ๋ํ๋ด๋ ์๋ฅผ E, ํ์์ ๋ํ๋ด๋ ์๋ฅผ S, ๋ฌ์ ๋ํ๋ด๋ ์๋ฅผ M์ด๋ผ๊ณ ํ์ ๋, ์ด ์ธ ์๋ ์๋ก ๋ค๋ฅธ ๋ฒ์๋ฅผ ๊ฐ์ง๋ค.
(1 โค E โค 15, 1 โค S โค 28, 1 โค M โค 19)
์ฐ๋ฆฌ๊ฐ ์๊ณ ์๋ 1๋
์ ์ค๊ท๊ฐ ์ด๊ณ ์๋ ๋๋ผ์์๋ 1 1 1๋ก ๋ํ๋ผ ์ ์๋ค. 1๋
์ด ์ง๋ ๋๋ง๋ค, ์ธ ์๋ ๋ชจ๋ 1์ฉ ์ฆ๊ฐํ๋ค.
๋ง์ฝ, ์ด๋ค ์๊ฐ ๋ฒ์๋ฅผ ๋์ด๊ฐ๋ ๊ฒฝ์ฐ์๋ 1์ด ๋๋ค.
์๋ฅผ ๋ค์ด, 15๋
์ 15 15 15๋ก ๋ํ๋ผ ์ ์๋ค. ํ์ง๋ง, 1๋
์ด ์ง๋์ 16๋
์ด ๋๋ฉด 16 16 16์ด ์๋๋ผ 1 16 16์ด ๋๋ค.
์ด์ ๋ 1 โค E โค 15 ๋ผ์ ๋ฒ์๋ฅผ ๋์ด๊ฐ๊ธฐ ๋๋ฌธ์ด๋ค.
E, S, M์ด ์ฃผ์ด์ก๊ณ , 1๋
์ด ์ค๊ท๊ฐ ์ฌ๋ ๋๋ผ์์ 1 1 1์ผ๋, ์ค๊ท๊ฐ ์ฌ๋ ๋๋ผ์์ E S M์ด ์ฐ๋ฆฌ๊ฐ ์๊ณ ์๋ ์ฐ๋๋ก ๋ช ๋
์ธ์ง ๊ตฌํ๋ ํ๋ก๊ทธ๋จ์ ์์ฑํ์์ค.
์
๋ ฅ)
์ฒซ์งธ ์ค์ ์ธ ์ E, S, M์ด ์ฃผ์ด์ง๋ค. ๋ฌธ์ ์ ๋์์๋ ๋ฒ์๋ฅผ ์งํค๋ ์
๋ ฅ๋ง ์ฃผ์ด์ง๋ค.
์ถ๋ ฅ)
์ฒซ์งธ ์ค์ E S M์ผ๋ก ํ์๋๋ ๊ฐ์ฅ ๋น ๋ฅธ ์ฐ๋๋ฅผ ์ถ๋ ฅํ๋ค. 1 1 1์ ํญ์ 1์ด๊ธฐ ๋๋ฌธ์, ์ ๋ต์ด ์์๊ฐ ๋์ค๋ ๊ฒฝ์ฐ๋ ์๋ค.
'''
#122244kb 116ms
E,S,M = map(int, input().split())
i=1
e,s,m=0,0,0
while True:
e+=1
s+=1
m+=1
if e==E and s==S and m==M:
print(i)
break
e %= 15
s %= 28
m %= 19
i+=1 |
import numpy as np
import pandas as pd
import seaborn as sns
import lightgbm as lgb
# Identify Categorical featurs
def categorical_featurs():
categorical_featurs = ["Location_ID",
"Auditorium_Type",
"Language",
"Business_Day",
"Is_Holiday",
"Genre",
"Rating",
"Awards"]
return(categorical_featurs)
# Identify Numerical featurs
def numerical_featurs():
numerical_featurs = ['Weeks_Since_Release', 'Runtime', 'Business_Week_Of_Year', 'Presales']
return(numerical_featurs)
# Define the model name
def model():
model = lgb.LGBMRegressor()
return (model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.