text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 13:56:19 2019
@author: wwech
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 2 23:36:29 2019
@author: wwech
"""
import sys
import requests
import re
import shutil
from bs4 import BeautifulSoup
url = "http://www.imfdb.org/wiki/Main_Page"
def get_source(url):
r = requests.get(url)
if r.status_code == 200:
return BeautifulSoup(r.text,"html.parser")
else:
sys.exit( "[~] Invalid Response Received." )
html = get_source(url)
html
def html_tag_filter( html, tag, attrs = None ):
tags = html.findAll(tag,attrs)
if tags:
return tags
else:
sys.exit("[~] No tags detected on the page.")
attrs = {"id":"mp-secondbanner"}
table_tag = html_tag_filter(html, "table", attrs)
links = []
for a in table_tag[0].find_all('a', href=True):
links.append("http://www.imfdb.org"+a['href'])
links.pop(0)
links
inner_links = []
for l in links:
inner_html = get_source(l)
inner_table_tag = html_tag_filter(inner_html,"table",{'class':'mw-allpages-table-chunk'})
for a in inner_table_tag[0].find_all('a', href=True):
inner_links.append("http://www.imfdb.org"+a['href'])
inner_links[:10]
from tqdm import tqdm
import os
import threading
THREAD_COUNTER = 0
THREAD_MAX = 5
os.mkdir( os.path.join( os.getcwd(), 'images' ) )
def requesthandle( link, name ):
r = requests.get( link, stream=True )
if r.status_code == 200:
r.raw.decode_content = True
f = open( "new/"+name, "wb" )
shutil.copyfileobj(r.raw, f)
f.close()
print("[*] Downloaded Image: %s" % name)
img_src = []
img_link = []
for l in tqdm(inner_links[:5]):
inner_html = get_source(l)
imgs = inner_html.findAll( "img" , attrs={'class': 'thumbimage'})
for img in imgs:
src = img.get( "src" )
if src:
src = re.match( r"((?:https?:\/\/.*)?\/(.*\.(?:jpeg|jpg)))", src )
if src:
img_src.append(src)
(link, name) = src.groups()
if link:
link = "http://www.imfdb.org"+link
img_link.append(link)
_t = threading.Thread( target=requesthandle, args=(link, name.split("/")[-1]) )
_t.daemon = True
_t.start()
while THREAD_COUNTER >= THREAD_MAX:
pass
while THREAD_COUNTER > 0:
pass
(l,n) = img_src[0].groups()
n.split('/')[-1]
img_link[0]
"""
inner_r = requests.get( src, stream=True )
if inner_r.status_code == 200:
inner_r.raw.decode_content = True
f = open( name.split("/")[-1], "wb" )
shutil.copyfileobj(inner_r.raw, f)
f.close()
""" |
from Bio import SeqIO
from profiler import time_usage
from sam_utils import extract_unmapped_reads_to_fasta_file
from vntr_finder import VNTRFinder
class GenomeAnalyzer:
def __init__(self, reference_vntrs, target_vntr_ids, working_directory='./'):
self.reference_vntrs = reference_vntrs
self.target_vntr_ids = target_vntr_ids
self.working_dir = working_directory
self.vntr_finder = {}
for ref_vntr in self.reference_vntrs:
if ref_vntr.id in target_vntr_ids:
self.vntr_finder[ref_vntr.id] = VNTRFinder(ref_vntr)
@staticmethod
def print_genotype(copy_numbers):
if copy_numbers is not None:
print('/'.join([str(cn) for cn in sorted(copy_numbers)]))
else:
print('None')
@time_usage
def get_vntr_filtered_reads_map(self, read_file, illumina=True):
vntr_reads = {}
vntr_read_ids = {}
empty_set = True
for vid in self.target_vntr_ids:
vntr_reads[vid] = []
read_ids = self.vntr_finder[vid].filter_reads_with_keyword_matching(self.working_dir, read_file, illumina)
vntr_read_ids[vid] = read_ids
if len(read_ids) > 0:
empty_set = False
if not empty_set:
unmapped_reads = SeqIO.parse(read_file, 'fasta')
for read in unmapped_reads:
for vntr_id in vntr_read_ids.keys():
if read.id in vntr_read_ids[vntr_id]:
vntr_reads[vntr_id].append(read)
return vntr_reads
def find_repeat_counts_from_pacbio_alignment_file(self, alignment_file):
unmapped_reads_file = extract_unmapped_reads_to_fasta_file(alignment_file, self.working_dir)
vntr_reads = self.get_vntr_filtered_reads_map(unmapped_reads_file, False)
for vid in self.target_vntr_ids:
reads = vntr_reads[vid]
copy_numbers = self.vntr_finder[vid].find_repeat_count_from_pacbio_alignment_file(alignment_file, reads)
print(vid)
self.print_genotype(copy_numbers)
def find_repeat_counts_from_pacbio_reads(self, read_file, naive=False):
vntr_reads = self.get_vntr_filtered_reads_map(read_file, False)
for vid in self.target_vntr_ids:
copy_numbers = self.vntr_finder[vid].find_repeat_count_from_pacbio_reads(vntr_reads[vid], naive)
print(vid)
self.print_genotype(copy_numbers)
def find_frameshift_from_alignment_file(self, alignment_file):
for vid in self.target_vntr_ids:
result = self.vntr_finder[vid].find_frameshift_from_alignment_file(alignment_file, [])
print(vid)
print(result)
def find_repeat_counts_from_alignment_file(self, alignment_file):
unmapped_reads_file = extract_unmapped_reads_to_fasta_file(alignment_file, self.working_dir)
vntr_reads = self.get_vntr_filtered_reads_map(unmapped_reads_file)
for vid in self.target_vntr_ids:
unmapped_reads = vntr_reads[vid]
copy_number = self.vntr_finder[vid].find_repeat_count_from_alignment_file(alignment_file, unmapped_reads)
print(vid)
self.print_genotype(copy_number)
def find_repeat_counts_from_short_reads(self, read_file):
for vid in self.target_vntr_ids:
copy_number = self.vntr_finder[vid].find_repeat_count_from_short_reads(read_file)
print(vid)
self.print_genotype(copy_number)
|
#!/usr/bin/env python
"""
The number, 197, is called a circular prime because all rotations of the
digits: 197, 971, and 719, are themselves prime.
There are thirteen such primes below 100:
2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
"""
from utils import reverse, num_digits, is_prime
max = 10**6
def next_prime():
n = 1
while True:
if is_prime(n):
yield n
n = n + 1
def rotate(n):
r, n = n % 10, n / 10
return n + r * 10 ** num_digits(n)
primes = []
for n in next_prime():
if n > max: break
primes.append(n)
num_circular = 0
for p in primes:
def test_circular(n):
for i in range(0, num_digits(n)):
if not is_prime(n):
return False
n = rotate(n)
return True
if test_circular(p):
num_circular += 1
print "There are", num_circular, "primes below", max
|
#!/usr/bin/python3
"""
Get a link to wiki page (as parameter, or interactively)
and returns a markup optimized for wordpress:
1. Replaced wiki latex generated images to native wordpress $latex $ tags.
2. Removes [.edit] links.
3. Points links to right domain (from which was get page).
4. Removes red links.
5. Replaces geshi highlighting to [sourcecode lang=""] tags.
Depends on bs4 unit.
"""
__author__ = "Bunyk T."
import re
import sys
import httplib2
import bs4
http = httplib2.Http()
def get(url):
return http.request(url)[1].decode('utf-8')
def get_source(domain, name):
text = get(domain + '/w/index.php?title=' + name + '&action=raw')
sources = re.findall('<source lang="(.*?)">(.*?)</source>', text, re.DOTALL)
return sources
def wiki_filter(ln, domain):
ln = re.sub( # replace latex
r'<img (?:(?:class="tex"|alt="(.*?)"|src=".*?") ?){3}/>',
r'$latex \1$',
ln)
ln = re.sub( # remove [.edit] links
r'<span class="editsection">.*?</span>',
"",
ln)
ln = re.sub( # remove comments
r'<!--.*?-->',
r"",
ln)
ln = re.sub( # point links to right domain
r'<a href="/wiki/(.*?)"',
r'<a href="' + domain + r'wiki/\1"',
ln)
return ln
START_MARK = "<!-- bodycontent -->"
FINISH_MARK = "<!-- /bodycontent -->"
GESHI_MARK = "GESHI-HIGHLIGHT"
if len(sys.argv) < 2:
url = input("Page url: ")
else:
url = sys.argv[1]
try:
domain = re.findall("(http://.*?/)wiki/", url)[0]
page_name = re.findall("http://.*?/wiki/(.*)$", url)[0]
except IndexError:
domain = re.findall("(http://.*?/)w/index.php", url)[0]
page_name = re.findall("http://.*?/w/index.php.*title=(.*?)&.*", url)[0]
doc = get(url)
has_source = False
soup = bs4.BeautifulSoup(doc)
geshis = soup.findAll("div", {"class": "mw-geshi"})
for i in geshis: # replace geshi highlight to temporal mark
i.replaceWith(GESHI_MARK)
has_source = True
red_links = soup.findAll("a", {"class":"new"}) # Remove red links
for i in red_links:
i.replaceWith(bs4.BeautifulSoup(i.renderContents()))
if has_source:
sources_list = get_source(domain, page_name) # get sources used on page
current_source = 0
doc = str(soup).splitlines()
inbody = False
for line in doc:
if re.search(FINISH_MARK, line):
break
if re.search(GESHI_MARK, line):
print(''.join([
"[sourcecode language='",
sources_list[current_source][0],
"']",
sources_list[current_source][1],
"[/sourcecode]",
]))
current_source += 1
continue
if inbody:
print(wiki_filter(line, domain))
continue
if re.search(START_MARK, line):
inbody = True
|
from flask import Flask, request
from SerialController import SerialController
import ast
# Flask框架
app = Flask(__name__)
#
# ser_device = SerialController()
# 创建全局变量 目前所有角度
now_angle = [None] * 20
ser_controller = SerialController()
now_angle[:18] = [90, 90, 90, 89, 90, 90, 99, 99, 99, 99, 99, 99, 109, 108, 108, 109, 109, 108]
now_angle[18:] = [45, 90]
angle_bias = [-5, 6, -10, -4, -5, 5, 11, 6, 16, 0, 11, 0, -4, -18, -13, 0, -4, 0, 0, 0]
def list_add(a, b):
c = []
for i in range(len(a)):
c.append(max(0, min(a[i] + b[i], 180))) # 限制偏差角
return c
@app.route("/", methods=["GET"])
def body_update():
try:
if request.method == "GET":
body_angle_list_str = request.args.get('body_angle')
cap_angle_list_str = request.args.get('cap_angle')
global now_angle
if body_angle_list_str is not None:
body_angle_list = ast.literal_eval(body_angle_list_str)
now_angle[:18] = body_angle_list
if cap_angle_list_str is not None:
cap_angle_list = ast.literal_eval(cap_angle_list_str)
now_angle[18:] = cap_angle_list
now_angle = list_add(now_angle, angle_bias)
print(now_angle)
# ser_controller.send_msg(now_angle)
return 'finish'
except Exception as e:
print(e)
@app.route("/get_temp", methods=["GET"])
def get_temp():
try:
file = open("/sys/class/thermal/thermal_zone0/temp")
# 读取结果,并转换为浮点数
temp = float(file.read()) / 1000
# 关闭文件
file.close()
return temp
except Exception as e:
print(e)
if __name__ == '__main__':
# 串口通信对象
app.run(host="0.0.0.0", port=9600, debug=True)
|
from selenium.webdriver.support.wait import WebDriverWait
from properties.PropertiesLoader import PropertiesLoader
class BasePage:
def __init__(self, app):
self.app = app
self._properties = PropertiesLoader()
self._port = self._properties.users_port
self._base_url = self._properties.host
self.driver = app.driver
self.wait = WebDriverWait(self.driver, self._properties.wait_delay)
def open(self):
self.driver.get(self._base_url)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20150618_0741'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ['-date_joined']},
),
migrations.RenameField(
model_name='user',
old_name='created',
new_name='date_joined',
),
]
|
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
CYAN = '\033[96m'
BROWN = '\033[33m'
binary = lambda n: '' if n == 0 else binary(n / 2) + str(n % 2)
class bunchMarker:
def __init__(self, BMSRCLK = None, BMRCLK = None, SR = None, gpio = None):
self._BMSRCLK = BMSRCLK
self._BMRCLK = BMRCLK
self._SR = SR
self._GPIO = gpio
self._init()
def _init(self):
self._GPIO.setup(self._BMSRCLK, self._GPIO.OUT)
self._GPIO.setup(self._BMRCLK, self._GPIO.OUT)
self._GPIO.setup(self._SR, self._GPIO.OUT)
@property
def BMSRCLK(self):
return self._BMSRCLK
@property
def BMRCLK(self):
return self._BMRCLK
@property
def SR(self):
return self._SR
def reset(self):
self._GPIO.output(self._BMSRCLK, False)
self._GPIO.output(self._BMRCLK, False)
self._GPIO.output(self._SR, False)
def serialShift(self, data): #Loads bit on falling edage and shifts bit on rising edge
self._GPIO.setup(self._BMSRCLK, self._GPIO.OUT)
self._GPIO.setup(self._BMRCLK, self._GPIO.OUT)
self._GPIO.setup(self._SR, self._GPIO.OUT)
for data_bit in data:
self._GPIO.output(self._SR, bool(int(data_bit)))
self._GPIO.output(self._BMSRCLK, False) # falling edge of srclk
self._GPIO.output(self._BMRCLK, True) # rising edge of register clock, inverse of srclk
self._GPIO.output(self._BMSRCLK, True) # rising edge of srclk
self._GPIO.output(self._BMRCLK, False) # falling edge of register clock
self._GPIO.output(self._BMRCLK, True)
self._GPIO.output(self._BMRCLK, False)
def bunchMarker(self, value):
bm = 65535 - value
bmSD = binary(bm)
self.serialShift(bmSD)
print bcolors.OKGREEN + "Bench Marker shifting is complete" + bcolors.ENDC
|
import os
import gc
import math
import numpy as np
import pandas
from common import *
import sf_calc
try:
get_ipython
from tqdm import tqdm_notebook as tqdm
except:
from tqdm import tqdm
class WeightManager:
@staticmethod
def CreateBins():
pt_bins = [ ]
pt_bins.extend(list(np.arange(20, 50, 5)))
pt_bins.extend(list(np.arange(50, 100, 10)))
pt_bins.extend(list(np.arange(100, 200, 20)))
pt_bins.extend(list(np.arange(200, 500, 50)))
pt_bins.extend(list(np.arange(500, 1000, 100)))
pt_bins.append(1000)
eta_bins = [0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.3]
pteta_bins = []
for pt_bin in range(len(pt_bins) - 1):
for eta_bin in range(len(eta_bins) - 1):
pteta_bins.append([ pt_bins[pt_bin], pt_bins[pt_bin + 1], eta_bins[eta_bin], eta_bins[eta_bin + 1] ])
return np.array(pt_bins), np.array(eta_bins), np.array(pteta_bins)
@staticmethod
def CreateWeightDataFrame(full_file_name, Y, pt_bins, eta_bins):
weight_df = ReadBrancesToDataFrame(full_file_name, 'taus', ['pt', 'eta'])
weight_result = sf_calc.ApplyUniformWeights(pt_bins, eta_bins, weight_df.pt.values, weight_df.eta.values, Y)
weight_df['weight'] = pandas.Series(weight_result[0], index=weight_df.index)
bin_ids = weight_result[1]
weight_df["pt_bin_ids"] = pandas.Series(bin_ids[:, 0], index=weight_df.index, dtype=int)
weight_df["eta_bin_ids"] = pandas.Series(bin_ids[:, 1], index=weight_df.index, dtype=int)
for n in range(len(match_suffixes)):
br_suff = match_suffixes[n]
weight_df['gen_'+br_suff] = pandas.Series(Y[:, n], index=weight_df.index)
return weight_df
def __init__(self, weight_file_name, calc_weights=False, full_file_name = None, Y = None, first_block = True):
self.pt_bins, self.eta_bins, self.pteta_bins = WeightManager.CreateBins()
if calc_weights:
if (full_file_name is None) or (Y is None):
raise RuntimeError("Missing information which is needed to calculate the weights.")
self.weight_df = WeightManager.CreateWeightDataFrame(full_file_name, Y, self.pt_bins, self.eta_bins)
self.SaveWeights(weight_file_name)
else:
self.weight_df = pandas.read_hdf(weight_file_name, 'weights')
if first_block:
for cl in ['e', 'mu', 'jet']:
self.weight_df["tau_vs_" + cl] = pandas.Series(np.zeros(self.weight_df.shape[0]),
index=self.weight_df.index)
self.weight_df["weight_" + cl] = pandas.Series(np.copy(self.weight_df.weight.values),
index=self.weight_df.index)
self.sum_tau_weights = self.weight_df[self.weight_df.gen_tau == 1].weight.sum()
gc.collect()
def GetWeights(self, start, stop):
return self.weight_df[["weight_e", "weight_mu", "weight_jet"]].values[start:stop, :]
def SetHistFileName(self, hist_file_name, overwrite=True):
self.hist_file_name = hist_file_name
if hist_file_name is not None and os.path.isfile(hist_file_name):
os.remove(hist_file_name)
def SaveWeights(self, weight_file_name):
self.weight_df.to_hdf(weight_file_name, 'weights', mode='w', format='fixed', complevel=1)
def UpdateWeights(self, model, epoch, X, test_start, n_test, sf_inputs, class_target_eff, batch_size=100000):
pred = model.predict([X[test_start:test_start+n_test], self.GetWeights(test_start, test_start+n_test),
sf_inputs[test_start:test_start+n_test]],
batch_size = batch_size, verbose=0)
print("\tpredictions has been calculated.")
n_bins = self.pteta_bins.shape[0]
n_updates = n_bins * len(class_target_eff)
df_update = pandas.DataFrame(data ={
'epoch': np.ones(n_updates, dtype=int) * epoch,
'cl_idx': np.ones(n_updates, dtype=int) * (-1),
'target_eff': np.zeros(n_updates),
'threashold': np.zeros(n_updates),
'pt_bin_id': np.zeros(n_updates, dtype=int),
'eta_bin_id': np.zeros(n_updates, dtype=int),
'pt_min': np.zeros(n_updates),
'pt_max': np.zeros(n_updates),
'eta_min': np.zeros(n_updates),
'eta_max': np.zeros(n_updates),
'is_updated': np.ones(n_updates, dtype=int),
'sf': np.ones(n_updates),
'eff': np.zeros(n_updates),
'eff_err': np.zeros(n_updates),
'n_taus': np.zeros(n_updates, dtype=int),
'n_passed': np.ones(n_updates),
})
upd_idx = 0
test_sel = (self.weight_df.index >= test_start) & (self.weight_df.index < test_start + n_test)
tau_sel = test_sel & (self.weight_df.gen_tau == 1)
thr = np.zeros(3)
all_target_eff = np.zeros(3)
for cl, target_eff in class_target_eff:
br_loc = self.weight_df.columns.get_loc('tau_vs_'+cl)
cl_idx = match_suffixes.index(cl)
tau_vs_cl = TauLosses.tau_vs_other(pred[:, tau], pred[:, cl_idx])
self.weight_df.iloc[test_start:test_start+n_test, br_loc] = tau_vs_cl
df_tau = self.weight_df[tau_sel]
cl_idx = min(cl_idx, 2)
thr[cl_idx] = np.percentile(df_tau["tau_vs_" + cl], (1 - target_eff) * 100)
#thr[cl_idx] = quantile_ex(df_tau["tau_vs_" + cl].values, 1 - target_eff, df_tau.weight.values)
all_target_eff[cl_idx] = target_eff
sf_results = sf_calc.CalculateScaleFactors(self.pt_bins, self.eta_bins,
self.weight_df.tau_vs_e.values, self.weight_df.tau_vs_mu.values, self.weight_df.tau_vs_jet.values,
self.weight_df.gen_tau.values, self.weight_df.weight.values, thr, all_target_eff, test_start,
n_test, self.weight_df.pt_bin_ids.values, self.weight_df.eta_bin_ids.values)
weights_changed = np.count_nonzero(sf_results[:, :, :, 0]) > 0
if weights_changed:
new_weights = sf_calc.ApplyScaleFactors(self.weight_df.pt_bin_ids.values,
self.weight_df.eta_bin_ids.values, sf_results, self.weight_df.gen_tau.values,
self.GetWeights(0, self.weight_df.shape[0]), self.weight_df.weight.values,
self.sum_tau_weights, 10)
for cl, target_eff in class_target_eff:
cl_idx = min(match_suffixes.index(cl), 2)
if np.count_nonzero(sf_results[cl_idx, :, :, 0]) > 0:
w_br_loc = self.weight_df.columns.get_loc('weight_' + cl)
self.weight_df.iloc[:, w_br_loc] = new_weights[:, cl_idx]
for cl, target_eff in class_target_eff:
cl_idx = min(match_suffixes.index(cl), 2)
for pt_bin_id in range(len(self.pt_bins) - 1):
for eta_bin_id in range(len(self.eta_bins) - 1):
df_update.loc[upd_idx, 'cl_idx'] = cl_idx
df_update.loc[upd_idx, 'target_eff'] = target_eff
df_update.loc[upd_idx, 'threashold'] = thr[cl_idx]
df_update.loc[upd_idx, 'pt_bin_id'] = pt_bin_id
df_update.loc[upd_idx, 'eta_bin_id'] = eta_bin_id
df_update.loc[upd_idx, 'pt_min'] = self.pt_bins[pt_bin_id]
df_update.loc[upd_idx, 'pt_max'] = self.pt_bins[pt_bin_id+1]
df_update.loc[upd_idx, 'eta_min'] = self.eta_bins[eta_bin_id]
df_update.loc[upd_idx, 'eta_max'] = self.eta_bins[eta_bin_id+1]
df_update.loc[upd_idx, 'is_updated'] = sf_results[cl_idx, pt_bin_id, eta_bin_id, 0]
df_update.loc[upd_idx, 'sf'] = sf_results[cl_idx, pt_bin_id, eta_bin_id, 1]
df_update.loc[upd_idx, 'n_taus'] = sf_results[cl_idx, pt_bin_id, eta_bin_id, 4]
df_update.loc[upd_idx, 'n_passed'] = sf_results[cl_idx, pt_bin_id, eta_bin_id, 7]
df_update.loc[upd_idx, 'eff'] = sf_results[cl_idx, pt_bin_id, eta_bin_id, 2]
df_update.loc[upd_idx, 'eff_err'] = sf_results[cl_idx, pt_bin_id, eta_bin_id, 3]
upd_idx += 1
cl_update = df_update[df_update.cl_idx == cl_idx]
cl_updated = cl_update[cl_update.is_updated > 0]
if cl_updated.shape[0] > 0:
average_sf = np.average(cl_updated.sf, weights=cl_updated.n_taus)
else:
average_sf = 0
print('tau_vs_{}: bins changed = {}, average sf = {}'.format(cl, cl_updated.shape[0], average_sf))
if self.hist_file_name is not None:
df_update.to_hdf(self.hist_file_name, "weight_updates", append=True, complevel=1, complib='zlib')
gc.collect()
|
import time
from HtmlDownloader import HtmlDownloader
from HtmlParser import HtmlParser
from DataOutput import DataOutput
class SpiderMan:
def __init__(self):
self.downloader = HtmlDownloader()
self.parser = HtmlParser()
self.output = DataOutput()
def crawl(self, root_url):
if root_url is None:
return
content = self.downloader.download(root_url)
urls = self.parser.parser_url(root_url, content)
if urls is None or len(urls) == 0:
return
for url in urls:
try:
r_url = url[0]
t = time.strftime('%Y%m%d%H%M%S' ,time.localtime()) + '338518'
argument0 = url[1]
rank_url = 'http://service.library.mtime.com/Movie.api?' + \
'Ajax_CallBack=true' + \
'&Ajax_CallBackType=Mtime.Library.Services' + \
'&Ajax_CallBackMethod=GetMovieOverviewRating' + \
'&Ajax_CrossDomain=1' + \
'&Ajax_RequestUrl=%s' + \
'&t=%s' + \
'&Ajax_CallBackArgument0=%s'
rank_url = rank_url % (r_url, t, argument0)
# print(rank_url)
jsons = self.downloader.download(rank_url)
data = self.parser.parser_json(rank_url, jsons)
if data:
self.output.store_data(data)
except Exception as e:
raise
print('Crawl failed')
self.output.output_end()
print('Crawl finish!')
if __name__ == '__main__':
spider = SpiderMan()
spider.crawl('http://theater.mtime.com/China_Beijing/')
|
import json
from functools import reduce
from django.conf import settings
from django.db import IntegrityError
from django.db import models
from django.db.models import Exists
from django.db.models import F
from django.db.models import IntegerField as DjangoIntegerField
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models.functions import Cast
from django.db.models.functions import Coalesce
from django.http import Http404
from django.utils.timezone import now
from django_cte import CTEQuerySet
from django_filters.rest_framework import CharFilter
from django_filters.rest_framework import UUIDFilter
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import roles
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.serializers import BooleanField
from rest_framework.serializers import ChoiceField
from rest_framework.serializers import DictField
from rest_framework.serializers import IntegerField
from rest_framework.serializers import ValidationError
from rest_framework.viewsets import ViewSet
from contentcuration.db.models.expressions import IsNull
from contentcuration.db.models.query import RIGHT_JOIN
from contentcuration.db.models.query import With
from contentcuration.db.models.query import WithValues
from contentcuration.models import AssessmentItem
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import ContentTag
from contentcuration.models import File
from contentcuration.models import generate_storage_url
from contentcuration.models import PrerequisiteContentRelationship
from contentcuration.models import UUIDField
from contentcuration.tasks import create_async_task
from contentcuration.tasks import get_or_create_async_task
from contentcuration.utils.nodes import calculate_resource_size
from contentcuration.viewsets.base import BulkListSerializer
from contentcuration.viewsets.base import BulkModelSerializer
from contentcuration.viewsets.base import BulkUpdateMixin
from contentcuration.viewsets.base import RequiredFilterSet
from contentcuration.viewsets.base import ValuesViewset
from contentcuration.viewsets.common import ChangeEventMixin
from contentcuration.viewsets.common import DotPathValueMixin
from contentcuration.viewsets.common import JSONFieldDictSerializer
from contentcuration.viewsets.common import NotNullMapArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import UserFilteredPrimaryKeyRelatedField
from contentcuration.viewsets.common import UUIDInFilter
from contentcuration.viewsets.sync.constants import CONTENTNODE
from contentcuration.viewsets.sync.constants import CREATED
from contentcuration.viewsets.sync.constants import DELETED
from contentcuration.viewsets.sync.constants import TASK_ID
from contentcuration.viewsets.sync.utils import generate_delete_event
from contentcuration.viewsets.sync.utils import generate_update_event
from contentcuration.viewsets.sync.utils import log_sync_exception
channel_query = Channel.objects.filter(main_tree__tree_id=OuterRef("tree_id"))
_valid_positions = {"first-child", "last-child", "left", "right"}
class ContentNodeFilter(RequiredFilterSet):
id__in = UUIDInFilter(field_name="id")
root_id = UUIDFilter(method="filter_root_id")
ancestors_of = UUIDFilter(method="filter_ancestors_of")
parent__in = UUIDInFilter(field_name="parent")
_node_id_channel_id___in = CharFilter(method="filter__node_id_channel_id")
class Meta:
model = ContentNode
fields = (
"parent",
"parent__in",
"id__in",
"kind",
"root_id",
"ancestors_of",
"_node_id_channel_id___in",
)
def filter_root_id(self, queryset, name, value):
return queryset.filter(
parent=Channel.objects.filter(pk=value).values_list(
"main_tree__id", flat=True
)
)
def filter_ancestors_of(self, queryset, name, value):
"""
See MPTTModel.get_ancestors()
"""
try:
# Includes the target node in the query
target_node = ContentNode.objects.get(pk=value)
if target_node.is_root_node():
return queryset.filter(pk=value)
return queryset.filter(
tree_id=target_node.tree_id,
lft__lte=target_node.lft,
rght__gte=target_node.rght,
)
except ContentNode.DoesNotExist:
return queryset.none()
def filter__node_id_channel_id(self, queryset, name, value):
query = Q()
values = value.split(",")
num_pairs = len(values) // 2
for i in range(0, num_pairs):
query |= Q(node_id=values[i * 2], channel_id=values[i * 2 + 1])
return queryset.filter(query)
tags_values_cte_fields = {
'tag': models.CharField(),
'node_id': UUIDField()
}
def set_tags(tags_by_id):
tag_tuples = []
tags_relations_to_delete = []
# put all tags into a tuple (tag_name, node_id) to send into SQL
for target_node_id, tag_names in tags_by_id.items():
for tag_name, value in tag_names.items():
tag_tuples.append((tag_name, target_node_id))
# create CTE that holds the tag_tuples data
values_cte = WithValues(tags_values_cte_fields, tag_tuples, name='values_cte')
# create another CTE which will RIGHT join against the tag table, so we get all of our
# tag_tuple data back, plus the tag_id if it exists. Ideally we wouldn't normally use a RIGHT
# join, we would simply swap the tables and do a LEFT, but with the VALUES CTE
# that isn't possible
tags_qs = (
values_cte.join(ContentTag, tag_name=values_cte.col.tag, _join_type=RIGHT_JOIN)
.annotate(
tag=values_cte.col.tag,
node_id=values_cte.col.node_id,
tag_id=F('id'),
)
.values('tag', 'node_id', 'tag_id')
)
tags_cte = With(tags_qs, name='tags_cte')
# the final query, we RIGHT join against the tag relation table so we get the tag_tuple back
# again, plus the tag_id from the previous CTE, plus annotate a boolean of whether
# the relation exists
qs = (
tags_cte.join(
CTEQuerySet(model=ContentNode.tags.through),
contenttag_id=tags_cte.col.tag_id,
contentnode_id=tags_cte.col.node_id,
_join_type=RIGHT_JOIN
)
.with_cte(values_cte)
.with_cte(tags_cte)
.annotate(
tag_name=tags_cte.col.tag,
node_id=tags_cte.col.node_id,
tag_id=tags_cte.col.tag_id,
has_relation=IsNull('contentnode_id', negate=True)
)
.values('tag_name', 'node_id', 'tag_id', 'has_relation')
)
created_tags = {}
for result in qs:
tag_name = result["tag_name"]
node_id = result["node_id"]
tag_id = result["tag_id"]
has_relation = result["has_relation"]
tags = tags_by_id[node_id]
value = tags[tag_name]
# tag wasn't found in the DB, but we're adding it to the node, so create it
if not tag_id and value:
# keep a cache of created tags during the session
if tag_name in created_tags:
tag_id = created_tags[tag_name]
else:
tag, _ = ContentTag.objects.get_or_create(tag_name=tag_name, channel_id=None)
tag_id = tag.pk
created_tags.update({tag_name: tag_id})
# if we're adding the tag but the relation didn't exist, create it now, otherwise
# track the tag as one relation we should delete
if value and not has_relation:
ContentNode.tags.through.objects.get_or_create(
contentnode_id=node_id, contenttag_id=tag_id
)
elif not value and has_relation:
tags_relations_to_delete.append(
Q(contentnode_id=node_id, contenttag_id=tag_id)
)
# delete tags
if tags_relations_to_delete:
ContentNode.tags.through.objects.filter(
reduce(lambda x, y: x | y, tags_relations_to_delete)
).delete()
class ContentNodeListSerializer(BulkListSerializer):
def gather_tags(self, validated_data):
tags_by_id = {}
for obj in validated_data:
try:
tags = obj.pop("tags")
except KeyError:
pass
else:
if tags:
tags_by_id[obj["id"]] = tags
return tags_by_id
def update(self, queryset, all_validated_data):
tags = self.gather_tags(all_validated_data)
modified = now()
for data in all_validated_data:
data["modified"] = modified
all_objects = super(ContentNodeListSerializer, self).update(
queryset, all_validated_data
)
if tags:
set_tags(tags)
return all_objects
class ExtraFieldsOptionsSerializer(JSONFieldDictSerializer):
modality = ChoiceField(choices=(("QUIZ", "Quiz"),), allow_null=True, required=False)
class ExtraFieldsSerializer(JSONFieldDictSerializer):
mastery_model = ChoiceField(
choices=exercises.MASTERY_MODELS, allow_null=True, required=False
)
randomize = BooleanField()
m = IntegerField(allow_null=True, required=False)
n = IntegerField(allow_null=True, required=False)
options = ExtraFieldsOptionsSerializer(required=False)
class TagField(DotPathValueMixin, DictField):
pass
class ContentNodeSerializer(BulkModelSerializer):
"""
This is a write only serializer - we leverage it to do create and update
operations, but read operations are handled by the Viewset.
"""
parent = UserFilteredPrimaryKeyRelatedField(
queryset=ContentNode.objects.all(), required=False
)
extra_fields = ExtraFieldsSerializer(required=False)
tags = TagField(required=False)
class Meta:
model = ContentNode
fields = (
"id",
"title",
"description",
"kind",
"language",
"license",
"license_description",
"copyright_holder",
"author",
"role_visibility",
"aggregator",
"provider",
"extra_fields",
"thumbnail_encoding",
"parent",
"complete",
"changed",
"tags",
)
list_serializer_class = ContentNodeListSerializer
nested_writes = True
def create(self, validated_data):
# Creating a new node, by default put it in the orphanage on initial creation.
if "parent" not in validated_data:
validated_data["parent_id"] = settings.ORPHANAGE_ROOT_ID
tags = None
if "tags" in validated_data:
tags = validated_data.pop("tags")
instance = super(ContentNodeSerializer, self).create(validated_data)
if tags:
set_tags({instance.id: tags})
return instance
def update(self, instance, validated_data):
if "parent" in validated_data:
raise ValidationError(
{"parent": "This field should only be changed by a move operation"}
)
extra_fields = validated_data.pop("extra_fields", None)
if extra_fields is not None:
validated_data["extra_fields"] = self.fields["extra_fields"].update(
instance.extra_fields, extra_fields
)
if "tags" in validated_data:
tags = validated_data.pop("tags")
set_tags({instance.id: tags})
return super(ContentNodeSerializer, self).update(instance, validated_data)
def retrieve_thumbail_src(item):
""" Get either the encoding or the url to use as the <img> src attribute """
try:
if item.get("thumbnail_encoding"):
encoding = json.loads(item.get("thumbnail_encoding"))
if encoding:
return encoding.get("base64")
except ValueError:
pass
if (
item["thumbnail_checksum"] is not None
and item["thumbnail_extension"] is not None
):
return generate_storage_url(
"{}.{}".format(item["thumbnail_checksum"], item["thumbnail_extension"])
)
return None
def get_title(item):
# If it's the root, use the channel name (should be original channel name)
return item["title"] if item["parent_id"] else item["original_channel_name"]
class PrerequisitesUpdateHandler(ViewSet):
"""
Dummy viewset for handling create and delete changes for prerequisites
"""
def _get_values_from_change(self, change):
return {
"target_node_id": change["key"][0],
"prerequisite_id": change["key"][1],
}
def _execute_changes(self, change_type, data):
if data:
if change_type == CREATED:
PrerequisiteContentRelationship.objects.bulk_create(
[PrerequisiteContentRelationship(**d) for d in data]
)
elif change_type == DELETED:
PrerequisiteContentRelationship.objects.filter(
reduce(lambda x, y: x | y, map(lambda x: Q(**x), data))
).delete()
def _check_permissions(self, changes):
# Filter the passed in contentondes, on both side of the relationship
allowed_contentnodes = set(
ContentNode.filter_edit_queryset(
ContentNode.objects.all(), self.request.user
)
.filter(
id__in=list(map(lambda x: x["key"][0], changes))
+ list(map(lambda x: x["key"][1], changes))
)
.values_list("id", flat=True)
)
valid_changes = []
errors = []
for change in changes:
if (
change["key"][0] in allowed_contentnodes
and change["key"][1] in allowed_contentnodes
):
valid_changes.append(change)
else:
change.update({"errors": ValidationError("Not found").detail})
errors.append(change)
return valid_changes, errors
def _check_valid(self, changes):
# Don't allow prerequisites to be created across different trees
# or on themselves
valid_changes = []
errors = []
tree_id_lookup = {
c["id"]: c["tree_id"]
for c in ContentNode.objects.filter(
id__in=list(map(lambda x: x["key"][0], changes))
+ list(map(lambda x: x["key"][1], changes))
).values("id", "tree_id")
}
# Do a lookup on existing prerequisite relationships in the opposite direction to the ones we are trying to set
# Create a lookup string of prerequisite_id:target_node_id which we will compare against target_node_id:prerequisite_id
existing_relationships_lookup = {
"{}:{}".format(p["prerequisite_id"], p["target_node_id"])
for p in PrerequisiteContentRelationship.objects.filter(
# First part of the key is the target_node_id and prerequisite_id the second, so we reverse them here
reduce(
lambda x, y: x | y,
map(
lambda x: Q(
target_node_id=x["key"][1], prerequisite_id=x["key"][0]
),
changes,
),
)
).values("target_node_id", "prerequisite_id")
}
for change in changes:
if change["key"][0] == change["key"][1]:
change.update(
{
"errors": ValidationError(
"Prerequisite relationship cannot be self referential"
).detail
}
)
errors.append(change)
elif tree_id_lookup[change["key"][0]] != tree_id_lookup[change["key"][1]]:
change.update(
{
"errors": ValidationError(
"Prerequisite relationship cannot cross trees"
).detail
}
)
errors.append(change)
elif (
"{}:{}".format(change["key"][0], change["key"][1])
in existing_relationships_lookup
):
change.update(
{
"errors": ValidationError(
"Prerequisite relationship cannot be reciprocal"
).detail
}
)
errors.append(change)
else:
valid_changes.append(change)
return valid_changes, errors
def _handle_relationship_changes(self, changes):
change_types = set(map(lambda x: x["type"], changes))
if len(change_types) > 1:
raise TypeError("Mixed change types passed to change handler")
change_type = tuple(change_types)[0]
permissioned_changes, permission_errors = self._check_permissions(changes)
if change_type == CREATED and permissioned_changes:
# Only do validation on create operations and if there are any changes left to validate
valid_changes, validation_errors = self._check_valid(permissioned_changes)
errors = permission_errors + validation_errors
else:
# For delete operations, just check permissions, but let invalid
# relationships be deleted
valid_changes = permissioned_changes
errors = permission_errors
data = list(map(self._get_values_from_change, valid_changes))
# In Django 2.2 add ignore_conflicts to make this fool proof
try:
self._execute_changes(change_type, data)
except IntegrityError as e:
for change in valid_changes:
change.update({"errors": str(e)})
errors.append(change)
return errors or None, None
def create_from_changes(self, changes):
return self._handle_relationship_changes(changes)
def delete_from_changes(self, changes):
return self._handle_relationship_changes(changes)
# Apply mixin first to override ValuesViewset
class ContentNodeViewSet(BulkUpdateMixin, ChangeEventMixin, ValuesViewset):
queryset = ContentNode.objects.all()
serializer_class = ContentNodeSerializer
permission_classes = [IsAuthenticated]
filterset_class = ContentNodeFilter
values = (
"id",
"content_id",
"title",
"description",
"author",
"assessment_item_count",
"provider",
"aggregator",
"content_tags",
"role_visibility",
"kind__kind",
"language_id",
"license_id",
"license_description",
"copyright_holder",
"extra_fields",
"node_id",
"root_id",
"channel_id",
"original_source_node_id",
"original_channel_id",
"original_channel_name",
"original_node_id",
"original_parent_id",
"total_count",
"resource_count",
"error_count",
"has_updated_descendants",
"has_new_descendants",
"coach_count",
"thumbnail_checksum",
"thumbnail_extension",
"thumbnail_encoding",
"published",
"modified",
"has_children",
"parent_id",
"complete",
"changed",
"lft",
)
field_map = {
"language": "language_id",
"license": "license_id",
"tags": "content_tags",
"kind": "kind__kind",
"thumbnail_src": retrieve_thumbail_src,
"title": get_title,
"parent": "parent_id",
}
def _annotate_channel_id(self, queryset):
return queryset.annotate(
channel_id=Subquery(channel_query.values_list("id", flat=True)[:1])
)
def get_queryset(self):
queryset = super(ContentNodeViewSet, self).get_queryset()
return self._annotate_channel_id(queryset)
def get_edit_queryset(self):
queryset = super(ContentNodeViewSet, self).get_edit_queryset()
return self._annotate_channel_id(queryset)
@action(detail=True, methods=["get"])
def requisites(self, request, pk=None):
if not pk:
raise Http404
# Here we are fetching the entire prerequisite relationship tree
# for the channel. It is possible that this could get very large,
# and cause performance issues, and it may not need to be loaded
# on every fetch.
# However, in order to detect potential cyclic prerequisite chains,
# we load the entire channel's prerequisite tree at once.
# Do a filter just on the tree_id of the target node, as relationships
# should not be cross channel, and are not meaningful if they are.
prereq_table_entries = PrerequisiteContentRelationship.objects.filter(
target_node__tree_id=Cast(
ContentNode.objects.filter(pk=pk).values_list("tree_id", flat=True)[:1],
output_field=DjangoIntegerField(),
)
).values("target_node_id", "prerequisite_id")
return Response(
list(
map(
lambda x: {
"target_node": x["target_node_id"],
"prerequisite": x["prerequisite_id"],
},
prereq_table_entries,
)
),
)
@action(detail=True, methods=["get"])
def size(self, request, pk=None):
if not pk:
raise Http404
task_info = None
node = self.get_object()
# currently we restrict triggering calculations through the API to the channel root node
if not node.is_root_node():
raise Http404
# we don't force the calculation, so if the channel is large, it returns the cached size
size, stale = calculate_resource_size(node=node, force=False)
if stale:
# When stale, that means the value is not up-to-date with modified files in the DB,
# and the channel is significantly large, so we'll queue an async task for calculation.
# We don't really need more than one queued async calculation task, so we use
# get_or_create_async_task to ensure a task is queued, as well as return info about it
task_args = dict(node_id=node.pk, channel_id=node.channel_id)
task_info = get_or_create_async_task(
"calculate-resource-size", self.request.user, **task_args
)
changes = []
if task_info is not None:
changes.append(self.create_task_event(task_info))
return Response({
"size": size,
"stale": stale,
"changes": changes
})
def annotate_queryset(self, queryset):
queryset = queryset.annotate(total_count=(F("rght") - F("lft") - 1) / 2)
descendant_resources = (
ContentNode.objects.filter(
tree_id=OuterRef("tree_id"),
lft__gt=OuterRef("lft"),
rght__lt=OuterRef("rght"),
)
.exclude(kind_id=content_kinds.TOPIC)
.values("id", "role_visibility", "changed")
.order_by()
)
all_descendants = (
ContentNode.objects.filter(
tree_id=OuterRef("tree_id"),
lft__gt=OuterRef("lft"),
rght__lt=OuterRef("rght"),
)
.values("id", "complete", "published")
.order_by()
)
# Get count of descendant nodes with errors
descendant_errors = all_descendants.filter(complete=False)
changed_descendants = descendant_resources.filter(changed=True)
thumbnails = File.objects.filter(
contentnode=OuterRef("id"), preset__thumbnail=True
)
original_channel_name = Coalesce(
Subquery(
Channel.objects.filter(pk=OuterRef("original_channel_id")).values(
"name"
)[:1]
),
Subquery(
Channel.objects.filter(main_tree__tree_id=OuterRef("tree_id")).values(
"name"
)[:1]
),
)
original_node = ContentNode.objects.filter(
node_id=OuterRef("original_source_node_id")
).filter(node_id=F("original_source_node_id"))
root_id = ContentNode.objects.filter(
tree_id=OuterRef("tree_id"), parent__isnull=True
).values_list("id", flat=True)[:1]
assessment_items = (
AssessmentItem.objects.filter(contentnode_id=OuterRef("id"), deleted=False)
.values_list("assessment_id", flat=True)
.distinct()
)
queryset = queryset.annotate(
resource_count=SQCount(descendant_resources, field="id"),
coach_count=SQCount(
descendant_resources.filter(role_visibility=roles.COACH), field="id",
),
assessment_item_count=SQCount(assessment_items, field="assessment_id"),
error_count=SQCount(descendant_errors, field="id"),
has_updated_descendants=Exists(
changed_descendants.filter(published=True).values("id")
),
has_new_descendants=Exists(
changed_descendants.filter(published=False).values("id")
),
thumbnail_checksum=Subquery(thumbnails.values("checksum")[:1]),
thumbnail_extension=Subquery(
thumbnails.values("file_format__extension")[:1]
),
original_channel_name=original_channel_name,
original_parent_id=Subquery(original_node.values("parent_id")[:1]),
has_children=Exists(
ContentNode.objects.filter(parent=OuterRef("id")).values("pk")
),
root_id=Subquery(root_id),
)
queryset = queryset.annotate(content_tags=NotNullMapArrayAgg("tags__tag_name"))
return queryset
def validate_targeting_args(self, target, position):
position = position or "last-child"
if target is None:
raise ValidationError("A target must be specified")
try:
target = self.get_edit_queryset().get(pk=target)
except ContentNode.DoesNotExist:
raise ValidationError("Target: {} does not exist".format(target))
except ValueError:
raise ValidationError("Invalid target specified: {}".format(target))
if position not in _valid_positions:
raise ValidationError(
"Invalid position specified, must be one of {}".format(
", ".join(_valid_positions)
)
)
return target, position
def move_from_changes(self, changes):
errors = []
changes_to_return = []
for move in changes:
# Move change will have key, must also have target property
# optionally can include the desired position.
move_error, move_change = self.move(
move["key"], target=move.get("target"), position=move.get("position")
)
if move_error:
move.update({"errors": [move_error]})
errors.append(move)
if move_change:
changes_to_return.append(move_change)
return errors, changes_to_return
def move(self, pk, target=None, position=None):
try:
contentnode = self.get_edit_queryset().get(pk=pk)
except ContentNode.DoesNotExist:
error = ValidationError("Specified node does not exist")
return str(error), None
try:
target, position = self.validate_targeting_args(target, position)
channel_id = target.channel_id
task_args = {
"user_id": self.request.user.id,
"channel_id": channel_id,
"node_id": contentnode.id,
"target_id": target.id,
"position": position,
}
task, task_info = create_async_task(
"move-nodes", self.request.user, **task_args
)
return (
None,
None,
)
except ValidationError as e:
return str(e), None
def copy_from_changes(self, changes):
errors = []
changes_to_return = []
for copy in changes:
# Copy change will have key, must also have other attributes, defined in `copy`
# Just pass as keyword arguments here to let copy do the validation
copy_errors, copy_changes = self.copy(copy["key"], **copy)
if copy_errors:
copy.update({"errors": copy_errors})
errors.append(copy)
if copy_changes:
changes_to_return.extend(copy_changes)
return errors, changes_to_return
def copy(
self,
pk,
from_key=None,
target=None,
position=None,
mods=None,
excluded_descendants=None,
**kwargs
):
try:
target, position = self.validate_targeting_args(target, position)
except ValidationError as e:
return str(e), None
try:
source = self.get_queryset().get(pk=from_key)
except ContentNode.DoesNotExist:
error = ValidationError("Copy source node does not exist")
return str(error), [generate_delete_event(pk, CONTENTNODE)]
# Affected channel for the copy is the target's channel
channel_id = target.channel_id
if ContentNode.objects.filter(pk=pk).exists():
error = ValidationError("Copy pk already exists")
return str(error), None
task_args = {
"user_id": self.request.user.id,
"channel_id": channel_id,
"source_id": source.id,
"target_id": target.id,
"pk": pk,
"mods": mods,
"excluded_descendants": excluded_descendants,
"position": position,
}
task, task_info = create_async_task(
"duplicate-nodes", self.request.user, **task_args
)
return (
None,
[generate_update_event(pk, CONTENTNODE, {TASK_ID: task_info.task_id})],
)
def delete_from_changes(self, changes):
errors = []
changes_to_return = []
queryset = self.get_edit_queryset().order_by()
for change in changes:
try:
instance = queryset.get(**dict(self.values_from_key(change["key"])))
task_args = {
"user_id": self.request.user.id,
"channel_id": instance.channel_id,
"node_id": instance.id,
}
task, task_info = create_async_task(
"delete-node", self.request.user, **task_args
)
except ContentNode.DoesNotExist:
# If the object already doesn't exist, as far as the user is concerned
# job done!
pass
except Exception as e:
log_sync_exception(e)
change["errors"] = [str(e)]
errors.append(change)
return errors, changes_to_return
|
import ROOT
import os
import root_numpy as rnp
import numpy as np
import pandas as pd
import tables
import uproot
import time
import multiprocessing
from samplesAOD2017 import *
from math import ceil
from ROOT import gROOT, TFile, TTree, TObject, TH1, TH1F, AddressOf, TLorentzVector
from dnn_functions import *
gROOT.ProcessLine('.L Objects.h' )
from ROOT import JetType, CaloJetType, MEtType, CandidateType, DT4DSegmentType, CSCSegmentType, PFCandidateType#, TrackType
from collections import defaultdict
from samplesAOD2017 import *
def convert_dataset_condor(folder,graphnet_folder,file_name,nj,npf,event_list,cols):
print(" Transform per-event into per-jet dataframes...")
print("\n")
startTime = time.time()
##folder+file_name WILL be folder+s and no loop ! TODO!
##Prepare train/test/val sample
df_pre_train = defaultdict()
df_pre_test = defaultdict()
df_pre_val = defaultdict()
store_pre_train = pd.HDFStore(folder+file_name+"_train.h5")
df_pre_train = store_pre_train.select("df",start=0,stop=-1)#
store_pre_test = pd.HDFStore(folder+file_name+"_test.h5")
df_pre_test = store_pre_test.select("df",start=0,stop=-1)#
store_pre_val = pd.HDFStore(folder+file_name+"_val.h5")
df_pre_val = store_pre_val.select("df",start=0,stop=-1)#
df_temp_train = defaultdict()
df_temp_test = defaultdict()
df_temp_val = defaultdict()
df_conc_train = defaultdict()
df_conc_test = defaultdict()
df_conc_val = defaultdict()
df_train = defaultdict()
df_test = defaultdict()
df_val = defaultdict()
#print(cols)
for j in range(nj):
temp_list = []
#print("Jet n. ",j)
for l in cols:
if ("Jet_"+str(j)) in l:
#print(l)
temp_list.append(l.replace('.','_'))
if "Jet_"+str(j)+"_isGenMatched" not in temp_list:
temp_list.append("Jet_"+str(j)+"_isGenMatched")
if "Jet_"+str(j)+"_pt" not in temp_list:
temp_list.append("Jet_"+str(j)+"_pt")
if "Jet_"+str(j)+"_eta" not in temp_list:
temp_list.append("Jet_"+str(j)+"_eta")
if "Jet_"+str(j)+"_timeRecHits" not in temp_list:
temp_list.append("Jet_"+str(j)+"_timeRecHits")
if "Jet_"+str(j)+"_isGenMatchedCaloCorrLLPAccept" not in temp_list:
temp_list.append("Jet_"+str(j)+"_isGenMatchedCaloCorrLLPAccept")
#print(temp_list)
df_temp_train = df_pre_train[temp_list+event_list]
df_temp_train["Jet_index"] = np.ones(df_temp_train.shape[0])*j
##Temp test
df_temp_test = df_pre_test[temp_list+event_list]
df_temp_test["Jet_index"] = np.ones(df_temp_test.shape[0])*j
##Temp val
df_temp_val = df_pre_val[temp_list+event_list]
df_temp_val["Jet_index"] = np.ones(df_temp_val.shape[0])*j
#print("\n")
#print("Before renaming")
#print(df_temp_val)
#Rename columns
for i, v in enumerate(temp_list):
if("PFCandidate" in v):
for p in range(npf):
feat = v.replace("Jet_"+str(j)+"_PFCandidate_"+str(p)+"_","")
df_temp_train.rename(columns={"Jet_"+str(j)+"_PFCandidate_"+str(p)+"_"+feat: feat+"_"+str(p)},inplace=True)
df_temp_test.rename(columns={"Jet_"+str(j)+"_PFCandidate_"+str(p)+"_"+feat: feat+"_"+str(p)},inplace=True)
df_temp_val.rename(columns={"Jet_"+str(j)+"_PFCandidate_"+str(p)+"_"+feat: feat+"_"+str(p)},inplace=True)
else:
feat = v.replace("Jet_"+str(j)+"_","")
df_temp_train.rename(columns={str(v): "Jet_"+feat},inplace=True)
df_temp_test.rename( columns={str(v): "Jet_"+feat},inplace=True)
df_temp_val.rename( columns={str(v): "Jet_"+feat},inplace=True)
#print("\n")
#print("After renaming")
#print(df_temp_val["Jet_isGenMatched"])
#Concatenate jets
if j==0:
df_conc_train = df_temp_train
df_conc_test = df_temp_test
df_conc_val = df_temp_val
else:
df_conc_train = pd.concat([df_conc_train,df_temp_train])
df_conc_test = pd.concat([df_conc_test,df_temp_test])
df_conc_val = pd.concat([df_conc_val,df_temp_val])
#Remove empty jets from train and val
df_train = df_conc_train[ df_conc_train["Jet_pt"]>0 ]
df_test = df_conc_test
df_val = df_conc_val[ df_conc_val["Jet_pt"]>0 ]
#print(df_train[["Jet_isGenMatched","Jet_pt"]])
print("\n")
print(" * * * * * * * * * * * * * * * * * * * * * * *")
print(" Time needed to convert: %.2f seconds" % (time.time() - startTime))
print(" * * * * * * * * * * * * * * * * * * * * * * *")
print("\n")
##write h5
#print(graphnet_folder+'/'+file_name)
df_train.to_hdf(graphnet_folder+'/'+file_name+'_train.h5', 'df', format='fixed')
print(" "+graphnet_folder+"/"+file_name+"_train.h5 stored")
df_test.to_hdf(graphnet_folder+'/'+file_name+'_test.h5', 'df', format='fixed')
print(" "+graphnet_folder+"/"+file_name+"_test.h5 stored")
df_val.to_hdf(graphnet_folder+'/'+file_name+'_val.h5', 'df', format='fixed')
print(" "+graphnet_folder+"/"+file_name+"_val.h5 stored")
#print(df_train)
#print(" DONEEEEE")
print(" ------------------- ")
'''
def convert_dataset_condor_per_jet(folder,graphnet_folder,file_name):
print(" Transform per-event into per-jet dataframes...")
print("\n")
startTime = time.time()
##folder+file_name WILL be folder+s and no loop ! TODO!
##Prepare train/test/val sample
df_pre_train = defaultdict()
df_pre_test = defaultdict()
df_pre_val = defaultdict()
store_pre_train = pd.HDFStore(folder+file_name+"_train.h5")
df_pre_train = store_pre_train.select("df",start=0,stop=-1)#
store_pre_test = pd.HDFStore(folder+file_name+"_test.h5")
df_pre_test = store_pre_test.select("df",start=0,stop=-1)#
store_pre_val = pd.HDFStore(folder+file_name+"_val.h5")
df_pre_val = store_pre_val.select("df",start=0,stop=-1)#
df_temp_train = defaultdict()
df_temp_test = defaultdict()
df_temp_val = defaultdict()
df_conc_train = defaultdict()
df_conc_test = defaultdict()
df_conc_val = defaultdict()
df_train = defaultdict()
df_test = defaultdict()
df_val = defaultdict()
#Transform per-event into per-jet
for j in range(nj):
temp_list = []
for l in var_list:#all variables
#for l in cols:#only the one we want to train?
if ("Jet_"+str(j)) in l:
#print(l)
temp_list.append(l.replace('.','_'))
#print("Here doing per jet")
#print(temp_list)
##Temp train
df_temp_train = df_pre_train[temp_list+event_list]
df_temp_train["Jet_index"] = np.ones(df_temp_train.shape[0])*j
##Temp test
df_temp_test = df_pre_test[temp_list+event_list]
df_temp_test["Jet_index"] = np.ones(df_temp_test.shape[0])*j
##Temp val
df_temp_val = df_pre_val[temp_list+event_list]
df_temp_val["Jet_index"] = np.ones(df_temp_val.shape[0])*j
#print("\n")
#print("Before renaming")
#print(df_temp_val_s)
#Rename columns
for i, v in enumerate(train_features):
for p in range(npf):
df_temp_train.rename(columns={"Jet_"+str(j)+"_PFCandidate_"+str(p)+"_"+train_features[i]: train_features[i]+"_"+str(p)},inplace=True)
df_temp_test.rename(columns={"Jet_"+str(j)+"_PFCandidate_"+str(p)+"_"+train_features[i]: train_features[i]+"_"+str(p)},inplace=True)
df_temp_val.rename(columns={"Jet_"+str(j)+"_PFCandidate_"+str(p)+"_"+train_features[i]: train_features[i]+"_"+str(p)},inplace=True)
for v in jvar:
#print("Jet_"+str(j)+"_"+str(v))
df_temp_train.rename(columns={"Jet_"+str(j)+"_"+str(v): "Jet_"+str(v)},inplace=True)
df_temp_test.rename( columns={"Jet_"+str(j)+"_"+str(v): "Jet_"+str(v)},inplace=True)
df_temp_val.rename( columns={"Jet_"+str(j)+"_"+str(v): "Jet_"+str(v)},inplace=True)
#print(df_temp_val_s[s])
#exit()
#Concatenate jets
if j==0:
df_conc_train = df_temp_train
df_conc_test = df_temp_test
df_conc_val = df_temp_val
else:
df_conc_train = pd.concat([df_conc_train,df_temp_train])
df_conc_test = pd.concat([df_conc_test,df_temp_test])
df_conc_val = pd.concat([df_conc_val,df_temp_val])
##df_train_s[s] = df_conc_train_s[s][ df_conc_train_s[s]["Jet_isGenMatched"]==1 ]
##df_test_s[s] = df_conc_test_s[s][ df_conc_test_s[s]["Jet_isGenMatched"]==1 ]
##no selections at the moment
df_train = df_conc_train
df_test = df_conc_test
df_val = df_conc_val
print(df_train)
print("\n")
print(" * * * * * * * * * * * * * * * * * * * * * * *")
print(" Time needed to convert: %.2f seconds" % (time.time() - startTime))
print(" * * * * * * * * * * * * * * * * * * * * * * *")
print("\n")
##write h5
df_train.to_hdf(graphnet_folder+'/'+file_name+'_train.h5', 'df', format='table' if (len(var_list)<=2000) else 'fixed')
print(" "+graphnet_folder+"/"+file_name+"_train.h5 stored")
df_test.to_hdf(graphnet_folder+'/'+file_name+'_test.h5', 'df', format='table' if (len(var_list)<=2000) else 'fixed')
print(" "+graphnet_folder+"/"+file_name+"_test.h5 stored")
df_val.to_hdf(graphnet_folder+'/'+file_name+'_val.h5', 'df', format='table' if (len(var_list)<=2000) else 'fixed')
print(" "+graphnet_folder+"/"+file_name+"_val.h5 stored")
print(" ------------------- ")
'''
|
#linear Regression algorithm to predict Bike Users vs Temparature
import pandas as pd
import matplotlib.pyplot as plot
from sklearn.model_selection import train_test_split
class BikePredictor:
try:
#Loading the data from csv file
userdata = pd.read_csv("bike_sharing.csv")
print(userdata.head(10))
#Renamed the column names
userdata.rename(columns={"temp": "Temperature", "cnt": "Shared Bikes"}, inplace=True)
print(userdata)
columns = userdata.columns
print(columns)
x_data = userdata.loc[:, ['Temperature']].values
y_data = userdata.loc[:, ['Shared Bikes']].values
#Splitting the data set into training data and testign data
x_train_data, x_test, y_train_data, y_test = train_test_split(x_data, y_data, test_size=8500, random_state=True)
#Using Linear regression module to predict the outcome
from sklearn.linear_model import LinearRegression
linear_module = LinearRegression()
linear_module.fit(x_train_data, y_train_data)
predictions = linear_module.predict(x_test)
print(predictions)
#graph for training data
plot.scatter(x_train_data, y_train_data, color = 'red')
plot.plot(x_train_data, linear_module.predict(x_train_data), color='black')
plot.xlabel("Temperature")
plot.ylabel("Shared Bikes")
plot.title("Shared Bikes vs Temperature")
plot.show()
#graph for testing data
plot.scatter(x_test, y_test, color='yellow')
plot.plot(x_train_data, linear_module.predict(x_train_data))
plot.xlabel("Temperature")
plot.ylabel("Shared Bikes")
plot.title("Shared Bikes vs Temperature")
plot.show()
except FileNotFoundError:
print("File not found")
|
from django.shortcuts import render
from django.http.response import HttpResponse, Http404
from django.template.loader import get_template
from django.template import Context
from django.shortcuts import render_to_response, redirect
from article.models import Article, Comments
from django.core.exceptions import ObjectDoesNotExist
from article.forms import CommentForm
from django.core.context_processors import csrf
from django.contrib import auth
# Create your views here.
def basic_one (request):
view = "basic_one"
html = "<html><body>This is %s view</body></html>" % view
return HttpResponse (html)
def template_two (request):
view = "template_two"
t = get_template ('myview.html')
html = t.render (Context ({'name': view}))
return HttpResponse (html)
def template_three_simple (request):
view = "template_three"
return render_to_response ('myview.html', {'name': view})
def articles (request):
return render_to_response (
'articles.html',
{
'articles': Article.objects.all (),
'username': auth.get_user (request).username,
}
)
def article (request, article_id = 1):
comment_form = CommentForm
args = {}
args.update (csrf (request))
args ['article'] = Article.objects.get (id = article_id)
args ['comments'] = Comments.objects.filter (comments_article_id = article_id)
args ['form'] = comment_form
args ['username'] = auth.get_user (request).username
return render_to_response ('article.html', args)
def addlike (request, article_id):
try:
if article_id in request.COOKIES:
redirect ('/')
else:
article = Article.objects.get (id = article_id)
article.article_likes += 1
article.save ()
response = redirect ('/')
response.set_cookie (article_id, "test")
return response
except ObjectDoesNotExist:
raise Http404
return redirect ('/')
def addcomment (request, article_id):
if request.POST and ("pause" not in request.session):
form = CommentForm (request.POST)
if form.is_valid ():
comment = form.save (commit = False)
comment.comments_article = Article.objects.get (id = article_id)
form.save ()
request.session.set_expiry (60)
request.session ['pause'] = True
return redirect ('/articles/get/%s/' % article_id)
|
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
rslt, visited= [], set()
def backtracking(i, curr):
if len(curr) == n:
rslt.append(curr)
else:
for j in range(n):
if (2, j) not in visited and (3, j-i) not in visited and (4, j+i) not in visited:
visited.add((2, j))
visited.add((3, j-i))
visited.add((4, j+i))
backtracking(i+1, curr + ["."*j+"Q"+"."*(n-j-1)] )
visited.remove((2, j))
visited.remove((3, j-i))
visited.remove((4, j+i))
backtracking(0, [])
return rslt
|
from lxml import etree, objectify
def save_to_file(objectify_tree: objectify.ObjectifiedElement, path,
machina_beautify: bool = True):
''' Saves ObjectifiedElement tree to file at path, will format and
beautify file in the style very similar to original Ex Machina
dynamicscene.xml files by default. Can skip beautifier and save raw
lxml formated file.
'''
xml_string = etree.tostring(objectify_tree,
pretty_print=True,
doctype='<?xml version="1.0" encoding="windows-1251" standalone="yes" ?>')
with open(path, "wb") as writer:
if machina_beautify:
writer.write(machina_xml_beautify(xml_string))
else:
writer.write(xml_string)
def machina_xml_beautify(xml_string: str):
''' Format and beautify xml string in the style very similar to
original Ex Machina dynamicscene.xml files.'''
beautified_string = b""
previous_line_indent = -1
# As first line of xml file is XML Declaration, we want to exclude it
# from Beautifier to get rid of checks for every line down the line
xml_string_first_line = xml_string[:xml_string.find(b"\n<")]
for i, line in enumerate(xml_string[xml_string.find(b"\n<")
+ 1:].splitlines()):
line_stripped = line.lstrip()
# calculating indent level of parent line to indent attributes
# lxml use spaces for indents, game use tabs, so indents maps 2:1
line_indent = (len(line) - len(line_stripped)) // 2
line = _split_tag_on_attributes(line_stripped, line_indent)
# manually tabulating lines according to saved indent level
line = line_indent * b"\t" + line + b"\n"
# in EM xmls every first and only first tag of its tree level is
# separated by a new line
if line_indent == previous_line_indent:
line = b"\n" + line
# we need to know indentation of previous tag to decide if tag is
# first for its tree level, as described above
previous_line_indent = line_indent
beautified_string += line
return xml_string_first_line + b"\n" + beautified_string
def _split_tag_on_attributes(xml_line: str, line_indent: int):
white_space_index = xml_line.find(b" ")
quotmark_index = xml_line.find(b'"')
# true when no tag attribute contained in string
if white_space_index == -1 or quotmark_index == -1:
return xml_line
elif white_space_index < quotmark_index:
# next tag attribute found, now indent found attribute and
# recursively start work on a next line part
return (xml_line[:white_space_index] + b"\n" + b"\t" * (line_indent + 1)
+ _split_tag_on_attributes(xml_line[white_space_index + 1:],
line_indent))
else:
# searching where attribute values ends and new attribute starts
second_quotmark_index = xml_line.find(b'"', quotmark_index + 1) + 1
return (xml_line[:second_quotmark_index]
+ _split_tag_on_attributes(xml_line[second_quotmark_index:],
line_indent))
|
import numpy as np
from scipy import stats
from kdeebm import mixture_model
from kdeebm import mcmc
from matplotlib import pyplot as plt
from kde_ebm_paper.data_gen import get_gamma
from kde_ebm_paper.waic_noxtoby import calculate_waic
from scipy.stats.distributions import chi2
def log_likelihood_ratio(ll1, ll2):
return(ll1-ll2)
def likelihood_ratio(ll1, ll2):
llmin = min([ll1,ll2])
llmax = max([ll1,ll2])
return(2*(llmax-llmin))
def likelihood_ratio_test(log_lh_1, log_lh_2, dof=1):
like_ratio = likelihood_ratio(log_lh_1,log_lh_2)
p = chi2.sf(like_ratio, dof) # dof is difference between DoF for each model
#print 'p: %.30f' % p
return p
# from waic_noxtoby import waic
# log_like = -100
# waic,lpd,p_waic,elpd_waic,p_loo,elpd_loo = waic(log_like)
def plot_imshow_results(res, shape_range, sep_range, vmax=None, vmin=None, lab=''):
fig, ax = plt.subplots()
im = ax.imshow(res, cmap='bwr', vmin=vmin, vmax=vmax)
ax.set_xticks(np.arange(0, len(shape_range)))
ax.set_xticklabels(shape_range)
ax.set_yticks(np.arange(0, len(sep_range)))
ax.set_yticklabels([str(x) for x in sep_range])
ax.set_xlabel('shape parameter')
ax.annotate("increasing Gaussianity",
xy=(shape_range[-2], sep_range[5]),
xytext=(shape_range[3], sep_range[5]),
arrowprops=dict(arrowstyle='-|>'),
va='center')
ax.set_ylabel('separation')
fig.colorbar(im,label=lab)
return fig, ax
np.random.seed(42)
# def main():
sep_range = [0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4]
shape_range = [0.1, 0.2, 0.5, 1, 2, 4, 6, 8, 10]
h_stage_prob = [0.55, 0.25, 0.15, 0.05]
n_samples = 200
n_repeats = 25
missing_vals_percent = 0.1
kde_ebm_results = np.empty((len(sep_range), len(shape_range), n_repeats))
gmm_ebm_results = np.empty((len(sep_range), len(shape_range), n_repeats))
kde_stage_results = np.empty((len(sep_range), len(shape_range), n_repeats))
gmm_stage_results = np.empty((len(sep_range), len(shape_range), n_repeats))
kde_like_results = np.empty((len(sep_range), len(shape_range),
n_repeats, len(h_stage_prob)-1))
gmm_like_results = np.empty((len(sep_range), len(shape_range),
n_repeats, len(h_stage_prob)-1))
kde_bic_results = np.empty((len(sep_range), len(shape_range), n_repeats, len(h_stage_prob)-1))
gmm_bic_results = np.empty((len(sep_range), len(shape_range), n_repeats, len(h_stage_prob)-1))
gmm_failed_fits = 0
kde_failed_fits = 0
for i in range(len(sep_range)):
for j in range(len(shape_range)):
for rep_n in range(n_repeats):
separation = sep_range[i]
shape = shape_range[j]
X, y, stages = get_gamma(n_samples, h_stage_prob,
separation, shape, n_biomarkers=3)
if missing_vals_percent > 0:
drop_vals = np.random.randint(low=0, high=X.size-1,
size=int(missing_vals_percent*X.size))
x_idx, y_idx = np.unravel_index(drop_vals, X.shape)
X[x_idx, y_idx] = np.nan
y = y.astype(int)
gmm_mixtures = mixture_model.fit_all_gmm_models(X, y)
kde_mixtures = mixture_model.fit_all_kde_models(X, y)
for k in range(len(gmm_mixtures)):
masked_X = X[:, k]
masked_X = masked_X[~np.isnan(masked_X)].reshape(-1, 1)
kde_like = kde_mixtures[k].likelihood(masked_X)
gmm_like = gmm_mixtures[k].likelihood(gmm_mixtures[k].theta, masked_X)
kde_bic = kde_mixtures[k].BIC(X=masked_X)
gmm_bic = gmm_mixtures[k].BIC(X=masked_X)
kde_like_results[i, j, rep_n, k] = kde_like
gmm_like_results[i, j, rep_n, k] = gmm_like
kde_bic_results[i, j, rep_n, k] = kde_bic
gmm_bic_results[i, j, rep_n, k] = gmm_bic
seq = np.arange(X.shape[1])
np.random.shuffle(seq)
g_truth = seq.argsort()
X = X[:, seq]
gmm_res = mcmc.enumerate_all(X, gmm_mixtures)
kde_res = mcmc.enumerate_all(X, kde_mixtures)
if gmm_res is None and kde_res is None:
gmm_failed_fits += 1
kde_failed_fits += 1
continue
elif gmm_res is None:
gmm_failed_fits += 1
continue
elif kde_res is None:
kde_failed_fits += 1
continue
gmm_kt = stats.kendalltau(gmm_res.ordering, g_truth)[0]
kde_kt = stats.kendalltau(kde_res.ordering, g_truth)[0]
kde_ebm_results[i, j, rep_n] = kde_kt
gmm_ebm_results[i, j, rep_n] = gmm_kt
kde_prob_mat = mixture_model.get_prob_mat(X, kde_mixtures)
kde_stages, stages_like = kde_res.stage_data(kde_prob_mat)
gmm_prob_mat = mixture_model.get_prob_mat(X, gmm_mixtures)
gmm_stages, stages_like = kde_res.stage_data(gmm_prob_mat)
kde_stage_corr = stats.spearmanr(stages, kde_stages)[0]
gmm_stage_corr = stats.spearmanr(stages, gmm_stages)[0]
kde_stage_results[i, j, rep_n] = kde_stage_corr
gmm_stage_results[i, j, rep_n] = gmm_stage_corr
p_val = stats.mannwhitneyu(gmm_stage_results.flatten(),
kde_stage_results.flatten()).pvalue
print('GMM corr=%f, KDE corr=%f (MWU: p=%E)' % (gmm_stage_results.mean(),
kde_stage_results.mean(),
p_val))
p_val = stats.mannwhitneyu(gmm_like_results.flatten(),
kde_like_results.flatten()).pvalue
print('GMM like=%f, KDE like=%f (MWU: p=%E)' % (gmm_like_results.mean(),
kde_like_results.mean(),
p_val))
#* BIC
p_val = stats.mannwhitneyu(gmm_bic_results.flatten(),
kde_bic_results.flatten()).pvalue
print('GMM BIC=%f, KDE BIC=%f (MWU: p=%E)' % (gmm_bic_results.mean(),
kde_bic_results.mean(),
p_val))
#* WAIC
waic_gmm,lpd_gmm,p_waic_gmm,elpd_waic_gmm = calculate_waic(-1*gmm_like_results.flatten())
waic_kde,lpd_kde,p_waic_kde,elpd_waic_kde = calculate_waic(-1*kde_like_results.flatten())
print('GMM like=%f, KDE like=%f | GMM WAIC = %E (eff. # params p_waic=%E); KDE WAIC = %E (eff. # params p_waic=%E)' % (gmm_like_results.mean(),
kde_like_results.mean(),
waic_gmm,p_waic_gmm,waic_kde,p_waic_kde))
p_val = stats.mannwhitneyu(gmm_ebm_results.flatten(),
kde_ebm_results.flatten()).pvalue
print('GMM tau=%f, KDE tau=%f (p=%E)' % (gmm_ebm_results.mean(),
kde_ebm_results.mean(),
p_val))
print('GMM failed %i, KDE failed %i' % (gmm_failed_fits, kde_failed_fits))
ebm_res_diff = (kde_ebm_results.mean(axis=-1) -
gmm_ebm_results.mean(axis=-1))
stage_res_diff = (kde_stage_results.mean(axis=-1) -
gmm_stage_results.mean(axis=-1))
like_diff = kde_like_results - gmm_like_results # negative log-like
like_diff *= -1 # log-like
like_diff = like_diff.mean(axis=(-1, -2)) # avg over events (-1) and repeats (-2)
#* PLOTS
fig, ax = plot_imshow_results(ebm_res_diff, shape_range, sep_range,
vmin=-2, vmax=2, lab='$\Delta$(Kendall tau: KDE MM vs GMM)')
ax.set_title("Sequence correlation with ground truth")
fig.show()
fig.savefig('ebm_res_miss-%f-n=%i.png' % (missing_vals_percent,
n_samples))
min_max = np.abs(stage_res_diff).max()
fig, ax = plot_imshow_results(stage_res_diff, shape_range,
sep_range, vmin=-min_max, vmax=min_max,
lab='$\Delta$(Spearman rho: KDE MM vs GMM)')
ax.set_title("Staging correlation with ground truth")
fig.show()
fig.savefig('stage_res_miss-%f-n=%i.png' % (missing_vals_percent,
n_samples))
min_max = np.abs(like_diff).max()
fig, ax = plot_imshow_results(like_diff, shape_range,
sep_range, vmin=-min_max, vmax=min_max,
lab="$\Delta$(Likelihood)")
ax.set_title("Likelihood difference: KDE MM vs GMM")
fig.show()
fig.savefig('like_res_miss-%f-n=%i.png' % (missing_vals_percent,
n_samples))
#* Likelihood Ratio test
# like_ratio = kde_like_results - gmm_like_results # negative log-like
# like_diff *= -1 # log-like
# like_diff = like_diff.mean(axis=(-1, -2)) # avg over n=25 repeats and m=3 events
#* Log-likelihoods
L1 = -1*gmm_like_results #.flatten()
L2 = -1*kde_like_results #.flatten()
L1_L2_ratio = log_likelihood_ratio(L1,L2)
p = chi2.sf(L1_L2_ratio, df=1)
p_av = chi2.sf(L1_L2_ratio.mean(axis=-2), df=1)
# p = np.empty(shape=L1_L2_ratio.shape)
# for k in range(len(p)):
# #* p-value is negative if GMM is more likely than KDEMM
# p[k] = (-1)**(L1[k]>=L2[k])*likelihood_ratio_test(L1[k],L2[k])
# fig, ax = plot_imshow_results_subplots(like_diff,
# p.mean(axis=(-2,-1)),
# shape_range,
# sep_range,
# vmin=p_av.min(), vmax=p_av.max())
# ax[1].set_title("Likelihood Ratio test, H_0:GMM, H_1:KDEMM")
# fig.show()
# def plot_imshow_results_subplots(res1, res2, shape_range, sep_range, vmax=None, vmin=None):
# fig, ax = plt.subplots(1,2,sharey=True)
# im1 = ax[0].imshow(res1, cmap='bwr', vmin=vmin, vmax=vmax)
# ax[0].set_xticks(np.arange(0, len(shape_range)))
# ax[0].set_xticklabels(shape_range)
# ax[0].set_yticks(np.arange(0, len(sep_range)))
# ax[0].set_yticklabels([str(x) for x in sep_range])
# ax[0].set_xlabel('shape parameter')
# ax[0].set_ylabel('separation')
# fig.colorbar(im1)
# im2 = ax[1].imshow(res1, cmap='bwr') #, vmin=vmin, vmax=vmax)
# ax[1].set_xticks(np.arange(0, len(shape_range)))
# ax[1].set_xticklabels(shape_range)
# ax[1].set_yticks(np.arange(0, len(sep_range)))
# ax[1].set_yticklabels([str(x) for x in sep_range])
# ax[1].set_xlabel('shape parameter')
# #ax[1].set_ylabel('separation')
# return fig, ax
############### https://stackoverflow.com/questions/7404116/defining-the-midpoint-of-a-colormap-in-matplotlib
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero.
Input
-----
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower offset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax / (vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highest point in the colormap's range.
Defaults to 1.0 (no upper offset). Should be between
`midpoint` and 1.0.
'''
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
orig_cmap = matplotlib.cm.bwr_r
shifted_cmap = shiftedColorMap(orig_cmap, midpoint=0.05, name='shifted')
shrunk_cmap = shiftedColorMap(orig_cmap, start=0, midpoint=0.05, stop=1, name='shrunk')
##################
def plot_imshow_results2(res, shape_range, sep_range, vmax=None, vmin=None, lab=''):
fig, ax = plt.subplots()
im = ax.imshow(res, cmap=shifted_cmap, vmin=vmin, vmax=vmax)
ax.set_xticks(np.arange(0, len(shape_range)))
ax.set_xticklabels(shape_range)
ax.set_yticks(np.arange(0, len(sep_range)))
ax.set_yticklabels([str(x) for x in sep_range])
ax.set_xlabel('shape parameter')
ax.set_ylabel('separation')
ax.annotate("increasing Gaussianity",
xy=(shape_range[-2], sep_range[5]),
xytext=(shape_range[3], sep_range[5]),
arrowprops=dict(arrowstyle='-|>'),
va='center')
fig.colorbar(im,label=lab)
return fig, ax
fig, ax = plot_imshow_results2(p.mean(axis=(-2,-1)),
shape_range,
sep_range,
vmin=p_av.min(), vmax=p_av.max(), lab='p value')
ax.set_title("Likelihood Ratio test, H$_0$:GMM, H$_1$:KDEMM")
fig.show()
fig.savefig('like_ratio_test-%f-n=%i.png' % (missing_vals_percent, n_samples))
print('GMM mean(LH)=%f, KDE mean(LH)=%f (mean p=%E)' % (np.mean(L1),
np.mean(L2),
np.mean(p)))
# if __name__ == '__main__':
# np.random.seed(42)
# main()
|
dane = ""
with open("liczby.txt", 'r') as plik:
dane = plik.readlines()
suma = 0
for x in dane:
if x.count("0") > x.count("1"):
suma+=1
print(suma)
|
from django import template
register = template.Library()
@register.filter(is_safe=True)
def macro(text, character):
from lok import models
from lok.models import Character
text = text.replace('#INFORMAL_NAME#', character.name)
text = text.replace('#TITLE_NAME#', character.title_name())
if (character.gender == Character.GENDER_MALE):
text = text.replace('#MAN_WOMAN_CAPITAL#', 'Man')
text = text.replace('#MAN_WOMAN#', 'man')
text = text.replace('#HIS_HER#', 'his')
text = text.replace('#HE_SHE#', 'he')
text = text.replace('#HE_SHE_CAPITAL#', 'He')
else:
text = text.replace('#MAN_WOMAN_CAPITAL#', 'Woman')
text = text.replace('#MAN_WOMAN#', 'woman')
text = text.replace('#HIS_HER#', 'her')
text = text.replace('#HE_SHE#', 'she')
text = text.replace('#HE_SHE_CAPITAL#', 'She')
return text
register.filter('macro', macro)
|
def ReturnTheString(L, idx1, idx2 = -1):
ans = ""
for i in range(len(L)):
if i != idx1 and i != idx2:
ans += str(L[i])
if ans == "":
return 0
else:
return ans
def fun(L):
L.sort(reverse=True)
if sum(L)%3 == 0:
ans = ""
for i in L:
ans += str(i)
return ans
else:
if len(L) == 1:
return 0
to_be_subtracted = sum(L)%3
#print(to_be_subtracted, sum(L))
if to_be_subtracted == 1:
rem = [-1]*2
for i in range(len(L)):
if L[i]%3 == 1:
return ReturnTheString(L, i)
if L[i]%3 == 2:
if rem[0] == -1:
rem[0] = i
elif rem[1] == -1:
rem[1] = i
if rem[0] != -1 and rem[1] != -1:
return ReturnTheString(L, rem[0], rem[1])
else:
rem1 = [-1]*2
for i in range(len(L)):
if L[i]%3 == 2:
return ReturnTheString(L, i)
if L[i]%3 == 1:
if rem1[0] == -1:
rem1[0] = i
elif rem1 [1] == -1:
rem1[1] = i
if rem1[0] != -1 and rem1[1] != -1:
return ReturnTheString(L, rem1[0], rem1[1])
L = list(map(int, input().split()))
#print(L) #for debugging.
print(fun(L)) |
import time
import threading
total = 4
def create_items():
global total
for i in range(10):
time.sleep(2)
print('added items')
total += 1
print('creation is done')
def create_items_2():
global total
for i in range(7):
time.sleep(1)
print('added items')
total += 1
print('creation is done')
def limit_items():
global total
while True:
if total > 5:
print('overload')
total -= 3
print('subtracted 3')
else:
time.sleep(1)
print('waiting')
creator1 = threading.Thread(target=create_items)
creator2 = threading.Thread(target=create_items_2)
limitor = threading.Thread(target=limit_items, daemon=True) # daemon ends the infinite loop once the main method is over
print(limitor.isDaemon())
creator1.start()
creator2.start()
limitor.start()
creator1.join()
creator2.join()
limitor.join()
print('Ultimate total = {}'.format(total))
|
from target import LocalTarget
import os
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Task(object):
@abc.abstractmethod
def run(self):
"""Run task."""
@property
def deps(self):
"""List of tasks the task depends on."""
return []
@abc.abstractmethod
def output(self):
"""Tasks write result here."""
def inputs(self):
return [d.output() for d in self.deps]
class HelloTask(Task):
def run(self):
print('Hello!')
def output(self):
return None
class GreetTask(Task):
_ages = {
'Alice': 25,
'Bob': 22,
'Charlie': 30}
def __init__(self, name):
self._name = name
def run(self):
print("I'm %s, %s years old." % (self._name, self._ages[self._name]))
self.output().open('tw').write("\t".join([self._name, str(self._ages[self._name])]))
def output(self):
return LocalTarget(os.path.join('data', '%s.tsv' % self._name))
@property
def deps(self):
"""Say hello before start talking about yourself."""
return [HelloTask()]
class HeightTask(Task):
def __init__(self, names):
self._names = names
def run(self):
outs = []
for inp in self.inputs():
d = inp.open(mode='tr').read().split('\t')
name = d[0]
age = int(d[1])
height = age * 2 + 120
print("%s's height is estimated as %s" % (name, height))
outs.append("\t".join([name, str(age), str(height)]))
self.output().open('tw').write("\n".join(outs))
def output(self):
return LocalTarget(os.path.join('data', 'height_data.tsv'))
@property
def deps(self):
return [GreetTask(n) for n in self._names]
if __name__ == '__main__':
GreetTask('Alice').run()
|
my_lists_of_ints = [1,2,3,]
["hit", "my" , "name is", "clint" , "This is a longer string.. still just a string" ]
["hi", 2 , False , 3.5, None]
print(my_lists_of_ints)
#just like variables we can re assign variables.
name = "josh"
age = 28
married = True
my_new_list = [name, age, married , "I like this"] # you can also add string literals into the list.
age = 39
print(my_new_list) # the new age will no chage this new print.
#how to access stuff in a list...
# list are number 0 1 2
my_children = ["Olivia", "Alle", "Mark"]
print(my_children[2]) #prints our mark
my_only_son = my_children[2]
print(my_only_son)
###########
# print(id(1)) #gives you the memory address
# a = 1
# print(id("asdfasdfasdfasdfasdfasdfasdf"))
|
"""
Последовательность состоит из натуральных чисел и завершается числом 0. Определите количество элементов этой последовательности, которые равны ее наибольшему элементу.
Формат ввода
Вводится последовательность целых чисел, оканчивающаяся числом 0 (само число 0 в последовательность не входит, а служит как признак ее окончания).
Формат вывода
Выведите ответ на задачу.
Тест 1
Входные данные:
1
7
9
0
Вывод программы:
1
Тест 2
Входные данные:
1
3
3
1
0
Вывод программы:
2
Тест 3
Входные данные:
1
2
3
4
5
0
Вывод программы:
1
"""
a = int(input())
res = []
res.append(a)
while a != 0:
a = int(input())
res.append(a)
m = max(res)
c = 0
for i in res:
if i == m:
c += 1
print(c)
"""
print(len([i for i in res if i == max(res)]))
""" |
"""
JS URLs settings
================
This file defines settings that can be overriden in the Django project's settings module.
"""
from django.conf import settings
# The "JS_URLS" setting allows to define which URLs should be serialized and made available in the
# Javascript helper. It should be noted that this setting should contain only URL names or
# namespaces. URL paths associated with configured URL names will be serialized. If namespaces are
# used in the context of this setting, all the underlying URL paths will be serialized.
URLS = getattr(settings, 'JS_URLS', [])
# The "JS_URLS_FUNCTION_NAME" setting allows to specify the name of the object that is made
# available through the window global object.
FUNCTION_NAME = getattr(settings, 'JS_URLS_FUNCTION_NAME', 'reverse')
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
font = {'family': 'Droid Sans',
'weight': 'normal',
'size': 14}
rc('font', **font)
p = [964.5,966.7,970.3,972.8,975.7,978.9,982.7,986.4]
T = [313.02,314.04,315.17,316,316.95,318.05,319.11,320]
y = []
for i in range(0, len(p)):
y.append(p[i]/T[i])
plt.scatter(p, y, color='k', s=25, marker="o")
plt.xlabel(u'Давление p, гПа')
plt.ylabel(u'p/T, гПа/К')
plt.title(u'График зависимости p/T(p)')
plt.grid(color='k', linestyle='-')
plt.show()
|
import sys
sys.stdin = open("원안의 마을_input.txt", "r")
N = int(input())
data = [list(map(int, input())) for _ in range(N)]
ans = []
answer = 0
for i in range(N):
for j in range(N):
if data[i][j] == 2:
x, y = i, j
elif data[i][j] == 1:
ans.append([i, j])
for k in ans:
[i, j] = k
d = ((i-x)**2 + (j-y)**2)**0.5
if answer < d:
answer = d
print(int(answer) + (answer%int(answer) > 0)) |
from flask import Blueprint, request, jsonify
from app.models import status
from pymongo import MongoClient
import time
client = MongoClient('localhost', 27017) #connects to local instance of MongoDB server
db = client.test
web_requests = Blueprint('web_requests', __name__)
@web_requests.route('/postoffset/<offset_id>', methods=['POST'])
def postoffset(offset_id = ''):
db = client.test
offset = db.offsets.find_one({'offset_id' : offset_id})
try:
if offset['activated'] == "True":
db.offsets.update_one({'offset_id' : offset['offset_id']}, {'$inc' : { 'total_donations' : 1 }})
db.offsets.update_one({'offset_id' : offset['offset_id']}, {'$push' : { 'offset_events' : {'donation_amount' : offset['donation_amount'],
'npo' : offset['npo'],
'data_time_stamp' : int(time.time()) }}})
return jsonify({'status' : status.STANDARD_200,
'offset_id' : offset_id})
#raised if offset_id not found in database
except TypeError, KeyError:
pass
return jsonify({'status':status.STANDARD_404.update({'request_key' : 'offset_id',
'request_value' : offset_id})})
@web_requests.route('/createoffset/<user_id>=<offset_id>', methods=['POST'])
def createoffset(user_id = '', offset_id = ''):
pass
|
import numpy as np
import cv2
from scipy import ndimage
import math
import csv
BRANCO = 255
vizinhos = [[0,0],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1]]
#verifica se o ponto pertence a uma fronteira
def bool_nas_Fronteiras(ponto, listaDeFronteiras):
for f in listaDeFronteiras:
if boolPontoNaBorda(ponto, f):
return True
return False
#retorna a posição de um pixel branco
def encontrar_prox_branco(ponto, img):
i, j = ponto
row, col = img.shape
while(i<row):
while (j<col):
if img[i,j] >= BRANCO:
return(i,j)
j+=1
j=0
i+=1
return (i-1,j)
#retorna o proximo ponto diferente de branco e que não esta na lista de fronteiras
def find_next_point(img, last_pixel, listaDeFronteiras):
i, j = last_pixel
row, col = img.shape
i+=1
while(i<row):
while (j<col):
if img[i,j] < BRANCO and img[i, j-1] >= BRANCO:
if bool_nas_Fronteiras((i,j), listaDeFronteiras) == False: #retorna o ponto
return (i,j)
else:#percorre a imagem ate achar um branco
ponto_branco = encontrar_prox_branco((i,j), img)
i=ponto_branco[0]
j=ponto_branco[1]
continue
j+=1
j=0
i+=1
return 0
def boolPontoNaBorda(ponto, fronteira):
return ponto in fronteira #encontrou o primeiro pronto da lista de fronteira
#encontra o primeiro pixel diferente de branco
def find_no_white(img):
row, col = img.shape
for i in range(row):
for j in range(col):
if img[i,j] < BRANCO:
return (i,j)#retorna a posição do pixel
#retorna a posição do array vizinhos
def obterVizinhoID(x, y):
for i in range(9):
if(x == vizinhos[i][0] and y == vizinhos[i][1]):
return i
#a partir de um pixel inicial percorre a borda da folha
def seguidorDeFronteira(img, first_pixel, i):
row, col = img.shape
fronteira=[]
fronteira.append(first_pixel) # adiciona o primeiro pixel já na lista de fronteira
x = 0
y = 1 #intuito de deixar o código mais legível
b_0 = [first_pixel[x], first_pixel[y]] #b_0[0] = x , b_0[1] = y
c_0 = [0, -1]
anterior_b0 = [0, 0]
cont = 1
contador_de_vizinhos = 0
find_init_border = True
contador=0
while(find_init_border):
indexVizinho=obterVizinhoID(c_0[x], c_0[y])
while(True):
if(indexVizinho == 8):#zerar o clock
indexVizinho = 0
proxB_0 = [b_0[x]+vizinhos[indexVizinho+1][x], b_0[y]+vizinhos[indexVizinho+1][y]]
proxVizinho = [b_0[x]+vizinhos[indexVizinho+1][x], b_0[y]+vizinhos[indexVizinho+1][y]] #atualiza para o próximo vizinho
if (img[proxVizinho[x]][proxVizinho[y]]<BRANCO) and cont==0: #verifica se o próximo vizinho é BRANCO
b_0 = [proxB_0[x], proxB_0[y]]
check = (b_0[x],b_0[y])
if (first_pixel == check): # quando encontrar o primeiro pixel novamente acaba o seguidor de fronteira
find_init_border = False
break
else:
fronteira.append((b_0[x],b_0[y])) # adiciona na lista de fronteiras
c_0 = [anterior_b0[x]-b_0[x], anterior_b0[y]-b_0[y]]
contador_de_vizinhos = 0
contador+=1
if proxB_0[x] > row: #para quando sair da imagem
return False, (0,0)
break
contador_de_vizinhos +=1
if contador_de_vizinhos == 9: #para quando estiver um loop infinito
return False, (0,0)
cont = 0
anterior_b0 = [proxB_0[x], proxB_0[y]]
indexVizinho += 1 #incrementa o vizinho
tamanho = len(fronteira)
if tamanho>50 and tamanho<25000: #tratamento da imagem 13
return True, fronteira
return False, (0,0)
def grayscale(img):
row, col, bpp = np.shape(img)
img_gray = []
for i in range(0,row):
for j in range(0,col):
b = int(img[i][j][0])
g = int(img[i][j][1])
r = int(img[i][j][2])
pixel = int((b+g+r) / 3)
img_gray.append(pixel)
return img_gray
def remove_ruidos(imagem):
img = imagem.astype('float')
img = img[:,:,0] # convert to 2D array
gray_img = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY) #converte para tom de cinza
_,img = cv2.threshold(gray_img, 225,255, cv2.THRESH_BINARY) #converte para binario
img = cv2.medianBlur(img, 5) # remove o ruido
return img
#inicio do algoritmo seguidor de fronteiras
def init(img):
listaDeFronteiras=[]
first_no_white_pixel = find_no_white(img)
next_pixel = first_no_white_pixel
i=0
while(next_pixel!=0):
try:
is_fronteira, fronteira = seguidorDeFronteira(img, next_pixel, i)
if is_fronteira: #caso seja uma fronteira valida
listaDeFronteiras.append(fronteira)
last_pixel = next_pixel
next_pixel = find_next_point(img, last_pixel, listaDeFronteiras)
except Exception as e:
print(e)
i+=1
#este tratamento funciona devido a imagem 13
for front in listaDeFronteiras:
if len(front) > 19000:
listaDeFronteiras.remove(front)
print("NUMERO DE FOLHAS ENCONTRADAS:")
print(len(listaDeFronteiras))
return listaDeFronteiras
#retorna a altura, largura, menor linha e menor coluna
def encontra_dimensoes(fronteira):
x = 0
y = 1
list_y = [cord_y[y] for cord_y in fronteira]
list_x = [cord_x[x] for cord_x in fronteira]
x_menor = list_x[0] #pega a primeira posicao
x_maior = max(list_x)
y_menor = min(list_y)
y_maior = max(list_y)
#+3 serve para adicionar uma borda branca
return (x_maior-x_menor)+3 , (y_maior-y_menor)+3, x_menor, y_menor
#cria a imagem da fronteira e retorna uma imagem com a mascara
def criar_imagem_borda(img_borda, fronteira, menor_x, menor_y, index, name_img):
row, col, bpp = img_borda.shape
img_borda_binaria = np.zeros((row, col))
for pixel in fronteira: #transalada os pixeis
nova_coordenada_x = (int(pixel[0])-menor_x)+1
nova_coordenada_y = (int(pixel[1])-menor_y)+1
img_borda[nova_coordenada_x][nova_coordenada_y][0] = 0
img_borda[nova_coordenada_x][nova_coordenada_y][1] = 0
img_borda[nova_coordenada_x][nova_coordenada_y][2] = 0
#criando imagem binaria
img_borda_binaria[nova_coordenada_x][nova_coordenada_y] = 1
nome_imagem = name_img + '-' + str(index) +"-P"+ ".png"
cv2.imwrite(nome_imagem, img_borda)
img_borda_binaria = ndimage.binary_fill_holes(img_borda_binaria).astype(int)
return img_borda_binaria
def criar_imagem_unica_folha_colorida(img, imagem_original, imagem_branca, fronteira, img_borda_binaria, index, menor_x, menor_y, name_img):
row, col, bpp = np.shape(imagem_branca)
for i in range(row):
for j in range(col):
if img_borda_binaria[i][j] == 1:
nova_coordenada_x = i+menor_x+1
nova_coordenada_y = j+menor_y+1
imagem_branca[i][j][0] = imagem_original[nova_coordenada_x][nova_coordenada_y][0]
imagem_branca[i][j][1] = imagem_original[nova_coordenada_x][nova_coordenada_y][1]
imagem_branca[i][j][2] = imagem_original[nova_coordenada_x][nova_coordenada_y][2]
nome_imagem = name_img + '-' + str(index) + ".png"
cv2.imwrite(nome_imagem, imagem_branca)
return imagem_branca
def recorta_imagem(lista_fronteiras, imagem_sem_ruido, imagem_original, name_img):
index = 1
for fronteira in lista_fronteiras:
row, col, menor_x, menor_y = encontra_dimensoes(fronteira)
#salavando a borda
imagem_branca = np.ones((row, col, 3)) * 255
img_borda_binaria = criar_imagem_borda(imagem_branca, fronteira, menor_x, menor_y, index, name_img)
#salvando a folha
criar_imagem_unica_folha_colorida(imagem_sem_ruido, imagem_original, imagem_branca, fronteira, img_borda_binaria, index, menor_x, menor_y, name_img)
index += 1
def valor_medio(histograma, lista_probabilidade):
media = 0
j = 0
for i in histograma:
media += i * lista_probabilidade[j]
j += 1
return media
def pixeis_coloridos(histograma):
total = 0
for i in histograma:
total += histograma[i]
return total
def probabilidade_de_cada_cor(histograma):
lista_probabilidade = []
total_pixeis_coloridos = pixeis_coloridos(histograma)
for i in histograma:
probabilidade = histograma[i] / total_pixeis_coloridos
lista_probabilidade.append(probabilidade)
return lista_probabilidade
def obter_histograma(imagem):
histograma = {}
img_gray = grayscale(imagem) #converte para tons de cinza arrendondando o valor
for cor in img_gray:
if cor != BRANCO:
if cor in histograma.keys():
histograma[cor] += 1
else:
histograma[cor] = 1
return histograma
#função que retorna a media, variancia, uniformidade, entropia de cada folha
def analise_textura(name_img, img_number):
name_img = name_img + '-' + str(img_number) + ".png"
imagem = cv2.imread(name_img)
histograma = obter_histograma(imagem)
probabilidade = probabilidade_de_cada_cor(histograma)
media = valor_medio(histograma, probabilidade)
j = 0
variancia = uniformidade = entropia = 0
for i in histograma:
variancia += (((i-media)**2) * probabilidade[j])
uniformidade += (probabilidade[j] ** 2)
entropia += (probabilidade[j] * np.log2(probabilidade[j])) * -1
j += 1
return media, variancia, uniformidade, entropia
#realiza o tratamento do nome da imagem 1 -> Teste01 retorna o nome e a imagem do disco
def pegar_nome(img_number):
name_img = 'Folhas/Teste'
if(img_number < 10):
name_img = name_img + '0'
name_img = name_img + str(img_number) + '.png'
imagem = cv2.imread(name_img)
name_img = name_img.split(".")[0]
return imagem, name_img
#cria a planilha .csv e adiciona os cabeçalhos
def criar_planilha():
planilha = csv.writer(open("SAIDAS.csv", "w"))
planilha.writerow(["ID imagem", "ID folha", "Media", "Variancia", "Uniformidade", "Entropia", "Perimetro"])
return planilha
#escreve na planilha os dados obtidos de cada folha
def incrementar_planilha(planilha, id_img, id_folha, media, variancia, uniformidade, entropia, perimetro):
id_img = id_img.removeprefix('Folhas/')
planilha.writerow([id_img, id_folha, media, variancia, uniformidade, entropia, perimetro])
#função principal do código qual chama as funções de execução
def main():
print("Bem Vindo ao Trabalho de PID")
img_number = 1
planilha = criar_planilha()
while(True):
try:
imagem, name_img = pegar_nome(img_number)
img_sem_ruido = remove_ruidos(imagem)
lista_fronteiras = []
print("Encontrando todas as bordas de: ", name_img)
print(" Este processo pode demorar um pouco")
lista_fronteiras = init(img_sem_ruido) #encontrar as bordas
recorta_imagem(lista_fronteiras, img_sem_ruido, imagem, name_img) # recortar as folhas e salvar em disco (salvar a borda e a folha colorida)
print("Analisando a textura das folhas encontradas")
index_sub_folha = 1
while True:
try:
media, variancia, uniformidade, entropia = analise_textura(name_img, index_sub_folha)
perimetro = len(lista_fronteiras[index_sub_folha-1])
incrementar_planilha(planilha, name_img, index_sub_folha, media, variancia, uniformidade, entropia, perimetro)
index_sub_folha += 1
except:
print("Fim da análise de textura para todas as folhas encontradas no arquivo: ", name_img)
break
img_number += 1
except:
print("Acabou :) EBAA!")
break
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from django.views.generic.base import View
from django.utils.decorators import classonlymethod
from django.utils.functional import update_wrapper
from django.contrib.contenttypes.models import ContentType
from auf.django.permissions import require_permission
from ecm.core.models import ECMCatalog
class ECMView(View):
context_key = 'slugs'
action = None
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(u"You tried to pass in the %s method name as a "
u"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError(u"%s() received an invalid keyword %r" % (
cls.__name__, key))
def get_slugs(**kwargs):
"""
Return slugs context from URL.
"""
return kwargs.get(cls.context_key).split('/')
def get_traversal(**kwargs):
"""
Get traversal (catalog entry) from URL.
"""
slugs = get_slugs(**kwargs)
unordered_brains = ECMCatalog.objects.filter(slug__in=slugs)
d = {}
for b in unordered_brains:
d[b.slug] = b
ordered_brains = []
for s in slugs:
ordered_brains.append(d[s])
return ordered_brains
def view(request, *args, **kwargs):
"""
Lookup the view class and return an instance of.
"""
# Setup the context
traversal = get_traversal(**kwargs)
# check for access granted in traversal
for seg in traversal:
obj = seg.get_object()
seg_ct = obj.__class__.__name__
perm = 'access %s' % seg_ct
require_permission(request.user, perm, obj=obj)
node = traversal[-1].get_object()
# Check if content type is not determined by view
ct = kwargs.get('content_type', None)
if ct is None:
content_type = node.content_type
else:
content_type = ContentType.objects.get(model=ct.lower())
model_class = content_type.model_class()
# Do check in model for considered content to
# find to good view URL( action) / content type / content id
action = initkwargs.get('action', None)
model_view_attr = "%s_view" % action
view_name = getattr(model_class, model_view_attr, None)
if hasattr(view_name, '__call__'):
view_name = view_name(request, *args, **kwargs)
if action is None or view_name is None:
raise Exception("ECM View must have an 'action' "
"parameter which map on 'action_view' property "
"of content.")
path = view_name.split('.')
klass = path.pop()
module = __import__(".".join(path), fromlist=".")
view = getattr(module, klass)
# Good stuff to make available in views
kwargs.update({
'traversal': traversal,
'node': node,
'model': model_class,
'action': action,
})
self = view(**initkwargs)
self.model = model_class
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
|
import FWCore.ParameterSet.Config as cms
from RecoParticleFlow.PFClusterProducer.particleFlowClusterECAL_cfi import particleFlowClusterECAL
particleFlowClusterECAL.energyCorrector.applyMVACorrections = True
particleFlowClusterECAL.energyCorrector.maxPtForMVAEvaluation = 90.
from Configuration.Eras.Modifier_run2_ECAL_2016_cff import run2_ECAL_2016
from Configuration.Eras.Modifier_run2_ECAL_2017_cff import run2_ECAL_2017
(run2_ECAL_2016 | run2_ECAL_2017).toModify(particleFlowClusterECAL,
energyCorrector = dict(srfAwareCorrection = True, maxPtForMVAEvaluation = 300.))
|
from django.shortcuts import render
# this view is like componements
#main lisitng page
def index(request):
return render(request, 'listings/listings.html')
def listing(request):
return render(request, 'listings/listing.html')
def search(request):
return render(request, 'lisitings/search.html')
|
#! /usr/bin/env python3
import sys
from datetime import date
from Metodosclientes import ListaClientes
from Metodostrabajos import ListaTrabajos
from repositorioClientes import RepositorioClientes
from repositorioTrabajos import RepositorioTrabajos
from trabajo import Trabajo
class Menu:
"Muestra ocpiones"
def __init__(self):
self.RC = RepositorioClientes()
self.RT = RepositorioTrabajos()
self.ListaC = ListaClientes()
self.ListaT = ListaTrabajos()
self.opciones = {
"1": self.NuevoCliente,
"2": self.MostrarClientes,
"3": self.BuscarCliente,
"4": self.ModificarDatosC,
"5": self.BorrarCliente,
"6": self.CargarNuevoT,
"7": self.MostrarTrabajos,
"8": self.FinalizarTrabajo,
"9": self.RetirarTrabajo,
"10": self.ModificarDatosT,
"11": self.HistorialTrabajosPorC,
"12": self.BorrarTrabajo,
"0": self.salir
}
def MostrarMenu(self):
print(""" ===============
S I S T E M A
===============
MENU CLIENTES: MENU TRABAJOS:
1. Ingresar un nuevo cliente 6. Cargar nuevo trabajo
2. Mostrar todos los clientes 7. Mostrar todos los trabajos
3. Buscar un cliente 8. Finalizar un trabajo
4. Modificar los datos de un cliente 9. Retirar un trabajo
5. Borrar un cliente 10. Modificar los datos de un trabajo
11. Historial de trabajos de un cliente
12. Borrar un trabajo
0. Salir del sistema
""")
def Ejecutar(self):
"Mostrar y responder opciones"
while True:
self.MostrarMenu()
opcion = input("INGRESA UNA OPCION: ")
accion = self.opciones.get(opcion)
if accion:
accion()
else:
print("{0} no es una opcion valida")
def NuevoCliente(self):
"Ingresa un nuevo cliente, ya sea corporativo o particular"
tipo = "N"
while tipo not in ("C", "c", "P", "p"):
tipo = input("""
Escogio la opcion para ingresar un nuevo cliente, por favor elija el tipo de cliente e ingreselo
C: Corporativo
P: Particular
Ingrese el tipo de cliente: """)
if tipo in ("C", "c"):
print("\nA continuacion se pediran los datos correspondientes al nuevo cliente\n")
NombreEmpresa = input("Ingrese el nombre de la empresa: ")
NombreContacto = input("Ingrese el nombre del contacto: ")
TelCont = input("Ingrese el telefono del contacto: ")
else:
print("\nA continuacion se pediran los datos correspondientes al nuevo cliente\n")
Nombre = input("Ingrese el nombre: ")
Apellido = input("Ingrese el apellido: ")
Tel = input("Ingrese el telefono: ")
Mail = input("Ingrese el mail: ")
if tipo in ("C", "c"):
C = self.ListaC.NuevoClienteCorp(NombreEmpresa, NombreContacto, TelCont, Tel, Mail)
else:
C = self.ListaC.NuevoClientePart(Nombre, Apellido, Tel, Mail)
if C is None:
print("===========================================")
print("Ocurrio un error al cargar al nuevo cliente")
print("===========================================")
else:
print("\n===========================================")
print("El clientes fue cargado con exito")
print("===========================================\n")
print(C)
print("===========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def MostrarClientes(self):
"Muestra todos los clientes"
c = self.RC.get_all()
if c:
l = self.RC.get_all_corporativos()
print(""" =====================""")
print(""" CLIENTES CORPORATIVOS""")
print(""" =====================""")
if l:
for i in l:
print("========================================")
print(i)
print("========================================")
t = self.RC.get_all_particulares()
print(""" =====================""")
print(""" CLIENTES PARTICULARES""")
print(""" =====================""")
if t:
for i in t:
print("========================================")
print(i)
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun cliente cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def BuscarCliente(self):
"Solicita un ID, busca al cliente con ese ID y lo muestra"
c = self.RC.get_all()
if c:
print("\nEscogio la opcion para buscar un cliente\n")
while True:
try:
id_cliente = int(input("Ingrese el ID del cliente que desea buscar: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
C = self.ListaC.BuscarPorID(id_cliente)
if C == None:
print("\n=================================================================")
print("El ID ingresado no pertenece a ningun cliente cargado actualmente")
print("=================================================================\n")
else:
print("\n===============================================\n")
print(C)
print("=================================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun cliente particular guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def ModificarDatosC(self):
"Modificar los datos de un cliente, ya sea cliente corporativo o particular"
tipo = "N"
while tipo not in ("C", "c", "P", "p"):
tipo = input("""
Escogio la opcion para modificar un cliente, por favor elija el tipo de cliente e ingreselo
C: Corporativo
P: Particular
Ingrese el tipo de cliente que desea modificar: """)
if tipo in ("C","c"):
l = self.RC.get_all_corporativos()
if l:
print("\n"" =====================""")
print(""" CLIENTES CORPORATIVOS""")
print(""" =====================""")
for I in l:
print("========================================\n")
print(I)
print("========================================\n")
while True:
try:
id_cliente = int(input("Ingrese el ID del cliente: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
Cliente = self.ListaC.BuscarPorID(id_cliente)
if Cliente:
print("========================================\n")
print(Cliente)
print("=================================================================================")
print("Modifique el campo que desee, de no querer modificar algun campo dejelo vacio")
print("=================================================================================\n")
NombreEmpresa = input("Ingrese el nombre de la empresa: ")
NombreContacto = input("Ingrese el nombre del contacto: ")
TelCont = input("Ingrese el telefono del contacto: ")
Tel = input("Ingrese el telefono: ")
Mail = input("Ingrese el mail: ")
C = self.ListaC.ModificarDatosCC(NombreEmpresa, NombreContacto, TelCont, Tel, Mail, id_cliente)
if C == None:
print("\n========================================\n")
print("Ocurrio un error al modificar los datos del cliente")
print("========================================\n")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n===============================================================\n")
print("""Los datos del cliente se modificaron con exito
A continuacion se podran ver los datos del cliente actualizados""")
print("\n===============================================================\n")
print("\n========================================")
print(Cliente)
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nEl ID ingresado no pertenece a ningun cliente corporativo guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun cliente corporativo guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
l = self.RC.get_all_particulares()
if l:
print("\n"" =====================""")
print(""" CLIENTES PARTICULARES""")
print(""" =====================""")
for I in l:
print("========================================\n")
print(I)
print("========================================\n")
while True:
try:
id_cliente = int(input("Ingrese el ID del cliente: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
Cliente = self.ListaC.BuscarPorID(id_cliente)
if Cliente:
print("\n========================================\n")
print(Cliente)
print("========================================\n")
print("==============================================================================")
print("Modifique el campo que desee, de no querer modificar algun campo dejelo vacio")
print("==============================================================================\n")
Nombre = input("Ingrese el nombre: ")
Apellido = input("Ingrese el apellido: ")
Tel = input("Ingrese el telefono: ")
Mail = input("Ingrese el mail: ")
C = self.ListaC.ModificarDatosCP(Nombre, Apellido, Tel, Mail, id_cliente)
if C == None:
print("\n================================================\n")
print("Ocurrio un error al modificar los datos del cliente")
print("==================================================\n")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n===============================================================\n")
print("""Los datos del cliente fueron modificaros con exito
A continuacion se podran ver los datos del cliente actualizados""")
print("\n===============================================================\n")
print("========================================\n")
print(Cliente)
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nEl ID ingresado no pertenece a ningun cliente particular guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun cliente particular guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def BorrarCliente(self):
"Solicita un ID y borra al cliente, en caso de que tenga trabajos pendientes, tambien los borra"
l = self.RC.get_all_corporativos()
if l:
print(""" =====================""")
print(""" CLIENTES CORPORATIVOS""")
print(""" =====================""")
for i in l:
print("========================================")
print("ID cliente: ",i.id_cliente,"- Nombre: ",i.nombre_empresa)
print("========================================")
t = self.RC.get_all_particulares()
if t:
print(""" =====================""")
print(""" CLIENTES PARTICULARES""")
print(""" =====================""")
for i in t:
print("========================================")
print("ID cliente: ", i.id_cliente, "- Nombre: ", i.nombre)
print("========================================\n")
if l or t:
while True:
try:
id_cliente = int(input("Ingrese el ID del cliente a borrar: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
D = self.ListaC.BuscarPorID(id_cliente)
if D:
print("\n========================================\n")
print(D)
print("========================================\n")
U = "J"
while U not in ("S","s","N","n"):
U = input("""¿Estas seguro que desea eliminar al cliente?
S: Si borrar al cliente
N: No borrar al cliente
Ingrese una opcion: """)
if U in ("S","s"):
B = self.ListaC.EliminarCliente(id_cliente)
if B == None:
print("========================================")
print("Ocurrio un error al querer borrar al cliente")
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n========================================")
print("El cliente fue borrado con exito")
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nHa decidido no borrar al cliente")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nEl ID ingresado no pertenece a ningun cliente guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun cliente guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def CargarNuevoT(self):
"Solicita el ID de un cliente y carga los datos de un nuevo trabajo"
l = self.RC.get_all_corporativos()
if l:
print(""" =====================""")
print(""" CLIENTES CORPORATIVOS""")
print(""" =====================""")
for i in l:
print("========================================")
print("ID cliente: ", i.id_cliente, "- Nombre: ", i.nombre_empresa)
print("========================================")
t = self.RC.get_all_particulares()
if t:
print(""" =====================""")
print(""" CLIENTES PARTICULARES""")
print(""" =====================""")
for i in t:
print("========================================")
print("ID cliente: ", i.id_cliente, "- Nombre: ", i.nombre)
print("========================================\n")
if l or t:
while True:
try:
id_cliente = int(input("Ingrese el ID del cliente: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
C = self.ListaC.BuscarPorID(id_cliente)
if C == None:
print("\nEl ID ingresado no pertenece a ningun cliente guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
fecha_ingreso = date.today()
print("\nLa fecha de ingreso es: ", fecha_ingreso)
print("\nA continuacion ingrese la fecha de entrega propuesta")
while True:
try:
dia = int(input("Ingrese el dia (1 a 31): "))
except ValueError:
print('Debe ingresar un numero del 1 al 31')
continue
break
while True:
try:
mes = int(input("Ingrese el mes (1 a 12): "))
except ValueError:
print('Debe ingresar un numero del 1 al 12')
continue
break
while True:
try:
anio = int(input("Ingrese el año: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
fecha_entrega_propuesta = date(anio, mes, dia)
descripcion = input("\nIngrese la descripcion del nuevo trabajo: ")
T = self.ListaT.NuevoTrabajo(C, fecha_ingreso, fecha_entrega_propuesta, descripcion)
if T == None:
print("\n========================================\n")
print("Ocurrio un error al cargar el nuevo trabajo")
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n========================================")
print("El nuevo trabajo se cargo con exito\n")
print(T)
print("========================================\n")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun cliente guardado en el sistema por lo que no se puede cargar un nuevo trabajo")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def MostrarTrabajos(self):
"Muestra una lista con todos los trabajos"
Lista = self.RT.get_all()
if Lista:
for Cliente in Lista:
print("\n===========================================\n")
print(Cliente)
print("===========================================")
else:
print("\nActualmente no se encuentra ningun trabajo cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def FinalizarTrabajo(self):
"Solicita un ID trabajo y modifica la fecha de entrega real"
t = self.RT.get_all()
if t:
for i in t:
print("========================================")
print(i.cliente)
print("ID trabajo: ",i.id_trabajo,"- Fecha entrega real: ",i.fecha_entrega_real)
print("========================================")
print("\n========================================")
while True:
try:
id_trabajo = int(input("Ingrese el ID del trabajo: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
C = self.ListaT.BuscarPorID(id_trabajo)
if C == None:
print("\nEl ID ingresado no pertenece a ningun trabajo guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
if C.fecha_entrega_real:
print("El estado del trabajo ya se encuentra como finalizado")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n========================================\n")
print (C)
print("========================================")
tipo = "w"
while tipo not in ("S","s","N","n"):
print("============================================================================")
tipo = input("""¿Estas seguro que desea dar por finalizado el trabajo?
S: Finalizar trabajo
N: No finalizar
Ingresa una opcion: """)
print("============================================================================")
if tipo in ("S","s"):
T = self.ListaT.TrabajoFinalizado(id_trabajo)
if T == None:
print("\n==============================================")
print("Error al modificar la entrega real del trabajo")
print("==============================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n====================================================")
print("\nLa entrega real del trabajo fue modificada con exito")
print("\n====================================================")
print(C)
print("====================================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n=================================================================")
print("No se realizo ninguna modificacion en la finalizacion del trabajo")
print("=================================================================\n")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun trabajo cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def RetirarTrabajo(self):
"Solicita un ID trabajo y lo marca como retirado"
t = self.RT.get_all()
if t:
for i in t:
print("\n========================================")
print(i.cliente)
print("ID trabajo: ",i.id_trabajo,"- Retirado: ",i.retirado)
print("=========================================\n")
while True:
try:
id_trabajo = int(input("Ingrese el ID del trabajo: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
C = self.ListaT.BuscarPorID(id_trabajo)
if C == None:
print("\nEl ID ingresado no pertenece a ningun trabajo guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
if C.retirado == True:
print("\nEl estado del trabajo ya se encuntra como retirado")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n========================================")
print(C)
print("========================================")
tipo = "w"
while tipo not in ("S","s","N","n"):
print("\n======================================================")
tipo = input("""¿Estas seguro que desea dar por finalizado el trabajo?
S: Retirar el trabajo
N: No retirar el trabajo
Ingresa una opcion: """)
print("======================================================")
if tipo in ("s","S"):
T = self.ListaT.Trabajo_retirado(id_trabajo)
if T == None:
print("========================================")
print("Error al retirar el trabajo")
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n========================================")
print("El trabajo fue retirado con exito")
print("========================================\n")
print(C)
print("\n========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n===========================================================")
print("No se realizo ninguna modificacion en el retiro del trabajo")
print("===========================================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun trabajo cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def ModificarDatosT(self):
"Solicita un ID trabajo y modifica los datos del trabajo"
t = self.RT.get_all()
if t:
for i in t:
print("\n========================================")
print("ID trabajo: ",i.id_trabajo)
print("Fecha de ingreso: ",i.fecha_ingreso)
print("Fecha entrega propuesta: ",i.fecha_entrega_propuesta)
print("Descripcion: ",i.descripcion)
print("========================================\n")
while True:
try:
id_trabajo = int(input("Ingrese el ID del trabajo: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
C = self.ListaT.BuscarPorID(id_trabajo)
if C == None:
print("\nEl ID ingresado no pertenece a ningun trabajo guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n=========================================")
print (C.cliente)
print("Trabajo:")
print("Fecha de ingreso: ",C.fecha_ingreso)
print("Fecha entrega propuesta: ",C.fecha_entrega_propuesta)
print("Descripcion: ",C.descripcion)
print("\n=============================================================================")
print("Modifique el campo que desee, de no querer modificar algun campo dejelo vacio")
print("=============================================================================")
tipo = "n"
while tipo not in ("I", "i", "P", "p", "D", "d", "C", "c"):
while tipo not in ("C", "c"):
tipo = input("""\n¿Estas seguro que desea hacer alguna modificacion?
I: Fecha de ingreso
P: Fecha entrega propuesta
D: Descripcion
C: No deseo realizar una modificacion
Ingrese una opcion: """)
if tipo in ("I","i"):
print("==========================")
print("Modificar fecha de ingreso\n")
while True:
try:
dia = int(input("Ingrese el dia (1 a 31): "))
except ValueError:
print('Debe ingresar un numero del 1 al 31')
continue
break
while True:
try:
mes = int(input("Ingrese el mes (1 a 12): "))
except ValueError:
print('Debe ingresar un numero del 1 al 12')
continue
break
while True:
try:
anio = int(input("Ingrese el año: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
FechaIngreso = date(anio, mes, dia)
T = self.ListaT.ModificarDatosT(FechaIngreso, C.fecha_entrega_real, C.descripcion, id_trabajo)
if T == None:
print("=========================================")
print("Error al modificar el trabajo")
print("=========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n=========================================")
print("Los datos que decidio modificar se modificaron con exito")
print("=========================================\n")
print(C)
print("=========================================")
if tipo in ("P","p"):
print("=====================================")
print("Modificar fecha de entregra propuesta\n")
while True:
try:
dia = int(input("Ingrese el dia (1 a 31): "))
except ValueError:
print('Debe ingresar un numero del 1 al 31')
continue
break
while True:
try:
mes = int(input("Ingrese el mes (1 a 12): "))
except ValueError:
print('Debe ingresar un numero del 1 al 12')
continue
break
while True:
try:
anio = int(input("Ingrese el año: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
FechaEntregaPropuesta = date(anio, mes, dia)
T = self.ListaT.ModificarDatosT(C.fecha_ingreso, FechaEntregaPropuesta, C.descripcion, id_trabajo)
if T == None:
print("=========================================")
print("Error al modificar el trabajo")
print("=========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n=========================================")
print("Los datos que decidio modificar se modificaron con exito")
print("=========================================\n")
print(C)
print("=========================================")
if tipo in ("D","d"):
print("====================================")
print("Modificar la descripcion del trabajo\n")
Descripcion = input("Ingrese la descripcion del trabajo: ")
T = self.ListaT.ModificarDatosT(C.fecha_ingreso, C.fecha_entrega_real, Descripcion, id_trabajo)
if T == None:
print("=========================================")
print("Error al modificar el trabajo")
print("=========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n=========================================")
print("Los datos que decidio modificar se modificaron con exito")
print("=========================================\n")
print(C)
print("=========================================")
if tipo in ("C","c"):
self.Ejecutar()
else:
print("\nActualmente no se encuentra ningun trabajo cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def BorrarTrabajo(self):
"Solicita un ID trabajo y borra un trabajo"
t = self.RT.get_all()
if t:
for i in t:
print("\n========================================")
print("ID trabajo: ",i.id_trabajo)
print("Fecha de ingreso: ",i.fecha_ingreso)
print("Fecha entrega propuesta: ",i.fecha_entrega_propuesta)
print("Fecha de entrega real: ",i.fecha_entrega_real)
print("Descripcion: ",i.descripcion)
print("Retirado: ",i.retirado)
print("=========================================\n")
while True:
try:
id_trabajo = int(input("Ingrese el ID del trabajo: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
C = self.ListaT.BuscarPorID(id_trabajo)
if C == None:
print("\nEl ID ingresado no pertenece a ningun trabajo guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n=========================================")
print(C)
print("=========================================\n")
tipo = "w"
while tipo not in ("S","s","N","n"):
tipo = input("""¿Estas seguro que desea eliminar el trabajo?
S: Eliminar trabajo
N: No eliminar trabajo
Ingresa una opcion: """)
if tipo in ("S","s"):
T = self.ListaT.EliminarTrabajo(id_trabajo)
if T == None:
print("=========================================")
print("Ocurrio un error al eliminar el trabajo")
print("=========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n=========================================")
print("El trabajo fue eliminado con exito")
print("=========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n==================================")
print("Ha decidido no eliminar el trabajo")
print("==================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun trabajo cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def HistorialTrabajosPorC(self):
"""Solicita un ID y muestra una lista con los trabajos encargados por el cliente"""
l = self.RC.get_all_corporativos()
if l:
print(""" =====================""")
print(""" CLIENTES CORPORATIVOS""")
print(""" =====================""")
for i in l:
print("========================================")
print("ID cliente: ",i.id_cliente,"- Nombre: ",i.nombre_empresa)
print("========================================")
t = self.RC.get_all_particulares()
if t:
print(""" =====================""")
print(""" CLIENTES PARTICULARES""")
print(""" =====================""")
for i in t:
print("========================================")
print("ID cliente: ", i.id_cliente, "- Nombre: ", i.nombre)
print("========================================\n")
if l or t:
while True:
try:
id = int(input("\nIngrese el ID del cliente: "))
except ValueError:
print('Debe ingresar un numero')
continue
break
C = self.ListaC.BuscarPorID(id)
if C == None:
print("\nEl ID ingresado no pertenece a ningun cliente guardado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\n========================================\n")
print(C)
print("========================================")
t = self.ListaT.TrabajoL
if t:
for I in t:
if I.cliente.id_cliente == id:
print("========================================\n")
print("ID trabajo: ",I.id_trabajo)
print("Fecha de ingreso: ",I.fecha_ingreso)
print("Fecha entrega propuesta: ",I.fecha_entrega_propuesta)
print("Fecha entrega real: ",I.fecha_entrega_real)
print("Descripcion: ",I.descripcion)
print("Retirado: ",I.retirado)
print("========================================")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente el cliente no cuenta con ningun trabajo cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
else:
print("\nActualmente no se encuentra ningun cliente cargado en el sistema")
input("\nPRESIONE CUALQUIER TECLA PARA VOLVER AL MENU PRINCIPAL DEL SISTEMA")
def salir(self):
print("Muchas gracias por haber utilizado el sistema")
sys.exit(0)
if __name__ == "__main__":
m = Menu()
m.Ejecutar() |
class TaskType:
JOINT = 'joint'
MENTION_LOCALIZATION = 'mention_localization'
COREFERENCE_RESOLUTION = 'coreference_resolution'
ENTITY_CLASSIFICATION = 'entity_classification'
RELATION_CLASSIFICATION = 'rel_classification'
|
from __future__ import annotations
from ..utils.yamlconfig_parser import parse_config
from typing import List, Dict, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from ..aep import AEP
class AEPObject():
name = 'abstract'
id_find_func = lambda self, definition: definition['id']
def __init__(self, definition: Dict, _aep: AEP, id= None):
""" Abstract object that describes an AEP artifact under a certain endpoint
as a Python class. Contains the unique id of the artifact on AEP and a
definition dictionary that describes the artifact details in AEP.
:param id: The unique id of this artifact on AEP.
:type id: str
:param definition: A dictionary that would recreate this artifact in a post request.
Normally also the result of making a get request for the id.
:type definition: Dict
:param _aep: The top class through which all requests are made.
:type _aep: AEP
"""
if id:
self.id = str(id)
else:
self.id = str(self.id_find_func(definition))
self.definition = definition
self._aep = _aep
@classmethod
def create_from_config(cls: AEPObject, collection: AEPCollection, config: Dict, _aep: AEP) -> AEPObject:
""" Creates an AEP artifact using a post request. Returns a class representing that
AEP artifact.
:param cls: The class representing the AEP artifact/endpoint.
:type cls: AEPObject
:param collection: The collection of endpoints this endpoint is part of.
:type collection: AEPCollection
:param config: The body for the post request to create the artifact.
:type config: Dict
:param _aep: The top class through which requests are made.
:type _aep: AEP
:return: An instance of the created class, which corresponds to some artifact on AEP.
:rtype: AEPObject
"""
result = _aep.post(path='.'.join((collection.name, cls.name)), body=config, params={})
return cls(result, _aep)
class AEPCollection:
def __init__(self, _aep: AEP, name: str):
""" A collection of endpoints
:param _aep: Top class through which requests are made.
:type _aep: AEP
:param name: Name of this collection.
:type name: str
"""
self._aep = _aep
self.name = name
def _create_aepobject(self, cls: AEPObject, config_path: str, arg_replacements: Dict) -> AEPObject:
""" Creates an AEP artifact through a post request. Body of post request is collected
by parsing a yaml file at config_path using the neccecary value replacements
as specified by arg_replacements.
:param cls: The class representing the AEP artifact/endpoint.
:type cls: AEPObject
:param config_path: Path to the config yaml that forms the body of the post request.
:type config_path: str
:param arg_replacements: Variable replacement when reading the config.
:type arg_replacements: Dict
:return: An instance of the created class, which corresponds to some artifact on AEP.
:rtype: AEPObject
"""
config = parse_config(config_path, arg_replacements=arg_replacements)
return cls.create_from_config(self, config, self._aep)
def _get_aepobject(self, cls: AEPObject, id: str) -> AEPObject:
""" Retrieves an existing AEP artifact through a get request.
Which artifact to retrieve is specified by the id.
:param cls: The class representing the AEP artifact/endpoint.
:type cls: AEPObject
:param id: the unique id for an exiting AEP artifact.
:type id: str
:return: An instance of the class, which corresponds to some artifact on AEP.
:rtype: AEPObject
"""
url_suffix = '/'+str(id)
result = self._aep.get(path='.'.join((self.name,cls.name)), url_suffix=url_suffix)
return cls(result, self._aep)
@staticmethod
def default_definition_extract_func(response):
return response['items']
def _get_aepobject_list(self, cls: AEPObject, definition_extract_func=None, get_params=None) -> List[AEPObject]:
if definition_extract_func is None:
definition_extract_func = self.default_definition_extract_func
if get_params is None:
get_params = {}
result = self._aep.get(path='.'.join((self.name,cls.name)), params=get_params)
definition_list = definition_extract_func(result)
return [cls(item, self._aep) for item in definition_list] |
import torch
import math
from torchvision import transforms as T
from data.dataset import CelebA
import numpy as np
import matplotlib.pyplot as plt
import torchvision
import os
import time
def imshow(img):
npimg = img.numpy()
plt.axis("off")
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def denorm(img: torch.Tensor):
return (img / 2 + 0.5).clamp_(min=0, max=1)
def visualize(imgs):
imgs = denorm(imgs)
imshow(torchvision.utils.make_grid(imgs, nrow=int(math.sqrt(len(imgs)))))
from trained_models.model2.PGGAN import Generator
def load_model(level):
G = Generator()
print(G.load_state_dict(torch.load(f'trained_models/model2/state_dict/G_{2 ** level}.pkl')))
G.cuda().eval()
return G
def test(G, level, batch_size=16):
assert level in range(2, G.R + 1)
batch_size = batch_size
x = torch.randn(size=(batch_size, G.latent_dim)).cuda()
fake_imgs = G(x, level=level).cpu().detach()
# real_imgs = dataset(batch_size, level=level)
# visualize(real_imgs)
visualize(fake_imgs)
torch.cuda.empty_cache()
def save_samples(G, level, batch_size):
x = torch.randn(size=(batch_size, G.latent_dim)).cuda()
if not os.path.isdir(f'samples/{level}'):
os.mkdir(f'samples/{level}')
fake_imgs = denorm(G(x, level=level).cpu().detach())
for img in fake_imgs:
img = T.ToPILImage()(img.squeeze(0))
img.save(f'samples/{level}/{time.time()}.jpg')
def interpolate_samples(G, level, batch_size):
x = torch.randn(G.latent_dim).cuda()
y = torch.randn(G.latent_dim).cuda()
step = (y - x) / (batch_size - 1)
latent = torch.stack([i * step + x for i in range(batch_size)])
images = G(latent, level=level).cpu().detach()
visualize(images)
dataset = CelebA(transform=T.Compose([T.ToTensor(), T.Normalize([0.5], [0.5])]))
|
from django.db import models
from datetime import datetime
from django import forms
from django.forms import ModelForm
# Create your models here.
class addd(models.Model):
title=models.CharField(max_length=100)
date=models.DateTimeField(default=datetime.now,blank=True)
def __str__(self):
return self.title
|
# Ruany Maia
# 27 Apr 2020
# Problem set 6 - DNA
# This program identifies a person based on their DNA
import sys
import csv
if (len(sys.argv) != 3):
print("Incorrect input. Please enter: dna.py valid_database.csv valid_sequence.txt")
else:
database_path = sys.argv[1]
sequence_path = sys.argv[2]
database_dict = csv.DictReader(open(database_path))
headers = database_dict.fieldnames[1:]
sequence_file = open(sequence_path, "r")
sequence = sequence_file.read()
str_count = dict()
for header in headers:
count = 0
to_find = header
while ((sequence.find(to_find) != -1)):
to_find = to_find + header
count += 1
str_count[header] = count
printed = False
for person_str_count in database_dict:
is_found_person = True
for header in headers:
is_found_person = is_found_person and str_count[header] == int(person_str_count[header])
if is_found_person:
print(person_str_count["name"])
printed = True
if not printed:
print("No match")
|
from rest_framework import serializers
from .models import Sale
class SaleSerializer(serializers.ModelSerializer):
class Meta:
model = Sale
exclude = ('created_at', 'updated_at',)
|
from gensim import corpora
from gensim import models
from gensim.utils import simple_preprocess
import numpy as np
documents = ["I love photograpy. Photography is art. Taking photos is my life.",
"I am interested in something else, I like cars",
"I am in love with photography. Photography is my life and I enjoy taking photos"]
# Create the Dictionary and Corpus
mydict = corpora.Dictionary([simple_preprocess(line) for line in documents])
corpus = [mydict.doc2bow(simple_preprocess(line)) for line in documents]
# Show the Word Weights in Corpus
for doc in corpus:
print([[mydict[id], freq] for id, freq in doc])
# Create the TF-IDF model
tfidf = models.TfidfModel(corpus, smartirs='ntc')
print("\n")
# Show the TF-IDF weights
for doc in tfidf[corpus]:
print([[mydict[id], np.around(freq, decimals=2)] for id, freq in doc])
|
import sys
cro_alpha = ['c=', 'c-', 'dz=', 'd-', 'lj', 'nj', 's=', 'z='] # 만약 여기서 내가 리스트를 만들어봤다면 어땠을까?, 만약 내가 여기서 리스트를 만들어봤다면
string = sys.stdin.readline().strip()
# string 변화시키기.
for i in cro_alpha:
if i in string: # string! 너 i 갖고 있냐? 있어? 그럼 아래로.
string = string.replace(i, '0')
print(len(string)) |
import numpy as np
import math
def where(i, range_set): #for finding the class lable of sample belongs to
total = 0
mean = math.ceil(sum(range_set) / len(range_set))
for k in range_set:
if total <= i+1 and i+1 <= (k + total):
return math.ceil((k + total) / mean) - 1
total += k
def formula(x, mu, d, C): #generalized formula for multivarient gaussian distribution
den = math.sqrt((2*np.pi)**d * np.linalg.det(C))
a = 1/2
b = (x-mu).reshape(1,-1)
c = np.matmul(np.linalg.pinv(C), (x-mu).reshape(-1,1))
e = np.matmul(b, c)
d = np.exp(-1 * a * e.item())
res = d / den
return res
def covariance(x, mu, n): #function to calculate covarience matrix
sum_matrix = np.matmul((x-mu).T, (x-mu))
C = (1/(n-1)) * sum_matrix
return C
def multivariate_gaussian(dataset,class_range, x = None): #kind of main function that handles the program
d, n, almu= dataset.shape[1], dataset.shape[0], []
C, Px = [], []
total = 0
for i in range(len(class_range)):
almu.append(np.mean(dataset[total:total+class_range[i],:],axis=0))
total += class_range[i]
total = 0
for i in range(len(class_range)):
C.append(covariance(dataset[total:class_range[i]+total,:], almu[i], class_range[i]))
total += class_range[i]
if x:
xx = np.array(x)
prob_x_allclass = []
for i in range(len(class_range)):
prob_x_allclass.append((formula(xx, almu[i], d, C[i])))
return prob_x_allclass
for i in range(n):
lable = where(i, class_range)
x = formula(dataset[i,:], almu[lable], d, C[lable])
print(x)
Px.append(x)
return Px
if __name__ == "__main__": #inside program itself
from sklearn import datasets
iris = datasets.load_iris().data
px = multivariate_gaussian(iris,[50,50,50])
|
#!/usr/bin/python3
t = tuple(range(25))
print(t, type(t))
print( 10 in t, 50 in t, 50 not in t, len(t))
for i in t: print(i)
l = list(range(20))
print(10 in l, 20 in l, 20 not in l, len(l))
for i in l: print(l)
l[10] = 25
print(l)
print(t.count(5), t.index(5))
l.append(100)
print(len(l), l)
l.extend(range(20))
print(len(l), l)
l.insert(0, 25)
print(len(l), l)
l.insert(12, 100)
print(len(l), l)
l.remove(12)
print(len(l), l)
del l[12]
print(len(l), l)
print(l.pop())
print(len(l), l)
print(l.pop(0))
print(len(l), l)
|
"""
213. House Robber II
After robbing those houses on that street, the thief has found himself a new place for his thievery so that he will not get too much attention.
This time, all houses at this place are arranged in a circle. That means the first house is the neighbor of the last one.
Meanwhile, the security system for these houses remain the same as for those in the previous street.
Given a list of non-negative integers representing the amount of money of each house,
determine the maximum amount of money you can rob tonight without alerting the police.
"""
# Solution house_robber for list[1:] and list[:-1]
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return sum(nums)
list1 = nums[:-1]
list2 = nums[1:]
return max(self.rob_house(list1), self.rob_house(list2))
def rob_house(self, nums):
rob, no_rob = 0, 0
for num in nums:
current_rob = no_rob + num
no_rob = max(rob, no_rob)
rob = current_rob
return max(rob, no_rob) |
salario = int(input('Qual o seu salario: '))
if salario > 1250:
print ("Seu novo salario: %d " % (salario*1.1))
if salario <= 1250:
print ("Seu novo salario: %d " % (salario*1.15))
|
from django.contrib import admin
from .models import Queue, Task
admin.site.register(Queue)
admin.site.register(Task)
|
import pandas as pd
import random
import math
import os
import numpy as np
import scipy
from scipy.interpolate import splrep, splev
from itertools import combinations
from sklearn.neighbors.kde import KernelDensity
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import GenericUnivariateSelect, chi2, f_classif, VarianceThreshold
from sklearn.feature_selection import SelectFpr
from sklearn.feature_selection import mutual_info_classif
from sklearn.model_selection import RepeatedKFold
from sklearn.neural_network import MLPClassifier
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.multicomp import MultiComparison
from operator import itemgetter
import csv
from sklearn.model_selection import RepeatedKFold
from scipy.ndimage.filters import gaussian_filter1d
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.svm import SVR
from scipy import stats
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import GenericUnivariateSelect, chi2, f_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import SelectFpr
from sklearn.feature_selection import SelectFdr
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import chi2
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge, Lasso
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.externals import joblib
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC, SVR
from sklearn.model_selection import RepeatedKFold
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import GenericUnivariateSelect, chi2, f_classif, VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import SelectFpr
from sklearn.feature_selection import SelectFdr
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import mutual_info_classif
import matplotlib.pyplot as plt
from sklearn import metrics
import time
import csv
import pickle
# import pyswarms as ps
from sklearn.neural_network import MLPClassifier
import statistics
import itertools
from sklearn.cluster import KMeans
# from skfeature.function.information_theoretical_based import CMIM, JMI, MIM, MRMR, RJMI, RelaxMRMR
import warnings
import time
from collections import Counter
def main():
summary_data = pd.read_csv('summary_results.csv')
feature_summary_data = pd.read_csv('top_features.csv')
summary_data_columns = summary_data.columns.values
feature_summary_data_columns = feature_summary_data.columns.values
summary_dataframe = pd.DataFrame(columns=summary_data_columns)
feature_summary_dataframe = pd.DataFrame(columns=feature_summary_data_columns)
model_sum_dataframe, feat_sel_dataframe = get_model_summary_data('none', 'no', 'non_impute_feature_imp_results', summary_data_columns, feature_summary_data_columns)
summary_dataframe = summary_dataframe.append(model_sum_dataframe, ignore_index=True)
feature_summary_dataframe = feature_summary_dataframe.append(feat_sel_dataframe, ignore_index=True)
model_sum_dataframe, feat_sel_dataframe = get_model_summary_data('none', 'yes', 'non_impute_feature_imp_smote_results', summary_data_columns, feature_summary_data_columns)
summary_dataframe = summary_dataframe.append(model_sum_dataframe, ignore_index=True)
feature_summary_dataframe = feature_summary_dataframe.append(feat_sel_dataframe, ignore_index=True)
model_sum_dataframe, feat_sel_dataframe = get_model_summary_data('wknn', 'no', 'weighted_knn_feature_imp', summary_data_columns, feature_summary_data_columns)
summary_dataframe = summary_dataframe.append(model_sum_dataframe, ignore_index=True)
feature_summary_dataframe = feature_summary_dataframe.append(feat_sel_dataframe, ignore_index=True)
model_sum_dataframe, feat_sel_dataframe = get_model_summary_data('wknn', 'yes', 'weighted_knn_smote_feature_imp', summary_data_columns, feature_summary_data_columns)
summary_dataframe = summary_dataframe.append(model_sum_dataframe, ignore_index=True)
feature_summary_dataframe = feature_summary_dataframe.append(feat_sel_dataframe, ignore_index=True)
# print(summary_dataframe)
summary_dataframe.to_csv('summary_results.csv', index=False)
feature_summary_dataframe.to_csv('top_features.csv', index=False)
def get_model_summary_data(imputation, smote, model_directory, summary_data_columns, feature_summary_data_columns):
path = os.getcwd()
input_directory = path + '\\' + model_directory + '\\results\\'
input_featureimp_directory = path + '\\' + model_directory + '\\results\\feature_importance\\'
model_summary_dataframe = pd.DataFrame(columns=summary_data_columns)
feature_selection_method_list = ['SelectFpr', 'SelectFdr', 'mutual_info_classif', 'feat_importance', 'L1_based',
'selectkbest_f_classif', 'GenericUnivariateSelect', 'SelectPercentile',
'VarianceThreshold']
# feature_selection_method_list = ['SelectFpr']
classifier_method_list = ['MLP', 'SVM', 'RDMF']
# classifier_method_list = ['SVM']
feat_sel_dataframe = pd.DataFrame(columns=feature_summary_data_columns)
i = 1
for feature_selection_method in feature_selection_method_list:
feature_data = readfeatimpDataFromFile(feature_selection_method, input_featureimp_directory)
top_15_feature = feature_data.head(15)
top_15_feature_list = top_15_feature["feature"].tolist()
temp_df = pd.DataFrame(columns=feature_summary_data_columns)
temp_df.loc[i, 'imputation'] = imputation
temp_df.loc[i, 'smote'] = smote
temp_df.loc[i, 'feature selection method'] = feature_selection_method
temp_df.loc[i, 'features identified'] = top_15_feature_list
feat_sel_dataframe = feat_sel_dataframe.append(temp_df, ignore_index=True)
classifier_auc_dict = {}
for classifier_method in classifier_method_list:
auc_accuracy_dataframe = pd.DataFrame(columns=summary_data_columns)
class_mean_auc = 0
class_auc_std = 1
class_auc_list = []
for feature_selection_method in feature_selection_method_list:
# print(feature_selection_method)
data = readDataFromFile(feature_selection_method, classifier_method, input_directory)
data_measure, no_of_feature_list, auc_by_no_of_feat_list, accuracy_by_no_of_feat_list, auc_stdev_by_no_of_feat_list, accuracy_stdev_by_no_of_feat_list = addmeasurestoresult(
data)
top_auc_list, top_accuracy_list, top_feature_combination = top_auc_accuracy_list(data_measure)
top_auc_list = [auc for auc in top_auc_list if str(auc) != 'nan']
top_accuracy_list = [accuracy for accuracy in top_accuracy_list if str(accuracy) != 'nan']
aucstdev = np.std(top_auc_list)
auc_CI_value = (aucstdev * 1.96)/math.sqrt(50)
accuracystdev = np.std(top_auc_list)
accuracy_CI_value = (accuracystdev * 1.96) / math.sqrt(50)
auc_mean = round(np.mean(top_auc_list), 2)
if auc_mean > class_mean_auc:
class_mean_auc = auc_mean
class_auc_std = aucstdev
class_auc_list = top_auc_list
elif auc_mean == class_mean_auc:
if aucstdev < class_auc_std:
class_mean_auc = auc_mean
class_auc_std = aucstdev
class_auc_list = top_auc_list
auc_accuracy_dataframe.loc[feature_selection_method, "imputaion"] = imputation
auc_accuracy_dataframe.loc[feature_selection_method, "smote"] = smote
auc_accuracy_dataframe.loc[feature_selection_method, "classifier"] = classifier_method
auc_accuracy_dataframe.loc[feature_selection_method, "feature selection method"] = feature_selection_method
auc_accuracy_dataframe.loc[feature_selection_method, "mean AUC"] = round(np.mean(top_auc_list), 2)
auc_accuracy_dataframe.loc[feature_selection_method, "stdev AUC"] = round(aucstdev,4)
auc_accuracy_dataframe.loc[feature_selection_method, "CI AUC"] = str(round(np.mean(top_auc_list), 2)) +' +/_ '+ str(round(auc_CI_value, 3))
auc_accuracy_dataframe.loc[feature_selection_method, "mean Accuracy"] = round(np.mean(top_accuracy_list), 2)
auc_accuracy_dataframe.loc[feature_selection_method, "stdev Accuracy"] = round(accuracystdev, 4)
auc_accuracy_dataframe.loc[feature_selection_method, "CI Accuracy"] = str(round(np.mean(top_accuracy_list), 2)) + ' +/- ' + str(round(accuracy_CI_value, 3))
auc_accuracy_dataframe.loc[feature_selection_method, "features selected"] = top_feature_combination
# print(auc_accuracy_dataframe)
# auc_accuracy_dataframe.to_csv(classifier_method + 'temp.csv')
sorted_auc_accuracy_dataframe = auc_accuracy_dataframe.sort_values(by=['mean AUC', 'stdev AUC', 'mean Accuracy',
'stdev Accuracy'], ascending=[False, True, False, True],
axis=0)
# print(sorted_auc_accuracy_dataframe)
# sorted_auc_accuracy_dataframe.to_csv(classifier_method + 'sort_temp.csv')
# print(sorted_auc_accuracy_dataframe.head(1))
# top_auc_accuracy_dataframe = sorted_auc_accuracy_dataframe
top_auc_accuracy_dataframe = sorted_auc_accuracy_dataframe.head(1)
model_summary_dataframe = model_summary_dataframe.append(top_auc_accuracy_dataframe, ignore_index=True)
# print(summary_dataframe)
classifier_auc_dict[classifier_method] = class_auc_list
# print(model_summary_dataframe)
model_summary_dataframe.to_csv(input_directory + 'model_summary_results.csv', index=False)
F, p = stats.f_oneway(classifier_auc_dict['MLP'], classifier_auc_dict['SVM'], classifier_auc_dict['RDMF'])
print(F)
print(p)
tucky_auc_list = []
tucky_class_list = []
for auc in classifier_auc_dict['MLP']:
tucky_class_list.append('MLP')
tucky_auc_list.append(auc)
for auc in classifier_auc_dict['SVM']:
tucky_class_list.append('SVM')
tucky_auc_list.append(auc)
for auc in classifier_auc_dict['RDMF']:
tucky_class_list.append('RDMF')
tucky_auc_list.append(auc)
print(tucky_auc_list)
print(tucky_class_list)
mc = MultiComparison(tucky_auc_list, tucky_class_list)
mc_results = mc.tukeyhsd()
print(mc_results)
return model_summary_dataframe, feat_sel_dataframe
def top_auc_accuracy_list(data_measure):
data_measure_gt6_feature = data_measure.iloc[5:16,:]
sorted_data_measure_dataframe = data_measure_gt6_feature.sort_values(by=['auc_mean', 'auc_stddev', 'accuracy_mean',
'accuracy_stddev'],
ascending=[False, True, False, True],
axis=0)
top_auc_datframe = sorted_data_measure_dataframe.head(1)
# top_auc_datframe = data_measure_gt6_feature.nlargest(1, 'auc_mean')
top_auc_list = []
top_accuracy_list = []
top_feature_combination = ''
for index, top_result in top_auc_datframe.iterrows():
# print(top_result.loc["feature combination"])
top_feature_combination = top_result.loc["feature combination"]
for i in range(1, 51):
top_auc_list.append(top_result.loc["AUC-" + str(i)])
top_accuracy_list.append(top_result.loc["ACCURACY-" + str(i)])
return top_auc_list, top_accuracy_list, top_feature_combination
def addmeasurestoresult(data):
index_list = []
no_of_feature_list = []
auc_by_no_of_feat_list = []
accuracy_by_no_of_feat_list = []
auc_stdev_by_no_of_feat_list = []
accuracy_stdev_by_no_of_feat_list = []
no_of_feature = 1
no_of_feat_auc_list = {}
tucky_index_list = []
tucky_auc_list = []
for index, data_row in data.iterrows():
index_list.append(index)
auc_list = []
accuracy_list = []
for i in range(1, 51):
# print(feature_combination_result.loc["AUC-"+str(i)])
auc_list.append(data_row.loc["AUC-" + str(i)])
tucky_index_list.append(index)
tucky_auc_list.append(data_row.loc["AUC-" + str(i)])
accuracy_list.append(data_row.loc["ACCURACY-" + str(i)])
auc_list = [auc for auc in auc_list if str(auc) != 'nan']
accuracy_list = [accuracy for accuracy in accuracy_list if str(accuracy) != 'nan']
no_of_feat_auc_list[index] = auc_list
data.loc[index, "auc_stddev"] = round(np.std(auc_list),2)
data.loc[index, "auc_mean"] = round(np.mean(auc_list),2)
data.loc[index, "auc_min"] = np.min(auc_list)
data.loc[index, "auc_max"] = np.max(auc_list)
data.loc[index, "auc_median"] = np.median(auc_list)
data.loc[index, "accuracy_stddev"] = round(np.std(accuracy_list),2)
data.loc[index, "accuracy_mean"] = round(np.mean(accuracy_list),2)
data.loc[index, "accuracy_min"] = np.min(accuracy_list)
data.loc[index, "accuracy_max"] = np.max(accuracy_list)
data.loc[index, "accuracy_median"] = np.median(accuracy_list)
no_of_feature_list.append(no_of_feature)
no_of_feature += 1
accuracy_by_no_of_feat_list.append(np.mean(accuracy_list))
auc_by_no_of_feat_list.append(np.mean(auc_list))
auc_stdev_by_no_of_feat_list.append(np.std(auc_list))
accuracy_stdev_by_no_of_feat_list.append(np.std(accuracy_list))
# print(list(no_of_feat_auc_list.values()))
# F, p = stats.f_oneway(no_of_feat_auc_list[0], no_of_feat_auc_list[1], no_of_feat_auc_list[2])
# if p <= 0.05:
# mc = MultiComparison(tucky_auc_list, tucky_index_list)
# mc_results = mc.tukeyhsd()
# # print(mc_results)
# tucky_df = pd.DataFrame(data=mc_results._results_table.data[1:], columns=mc_results._results_table.data[0])
#
# # print(tucky_df)
# tucky_true_df = tucky_df.loc[tucky_df['reject'] == True]
# if tucky_true_df.empty == False:
# print(tucky_df.loc[tucky_df['reject'] == True])
# print(F)
# print(p)
# print(data)
return data, no_of_feature_list, auc_by_no_of_feat_list, accuracy_by_no_of_feat_list, auc_stdev_by_no_of_feat_list, accuracy_stdev_by_no_of_feat_list
def readDataFromFile(feature_selection_method, classifier_method, input_directory):
data = pd.read_csv(input_directory + feature_selection_method + "-" + classifier_method + ".csv")
return data
def readfeatimpDataFromFile(feature_selection_method, input_directory):
data = pd.read_csv(input_directory + feature_selection_method + "-importance.csv")
return data
main() |
import sys
sys.stdin=open("input.txt","rt")
input=sys.stdin.readline
x=int(input())
d=[0]*30001
for i in range(2,x+1):
d[i]=d[i-1]+1
if i % 2==0:
d[i]=min(d[i], d[i//2]+1)
if i % 3==0:
d[i]=min(d[i], d[i//3]+1)
if i % 5==0:
d[i]=min(d[i], d[i//5]+1)
print(d[x]) |
# Generated by Django 2.2.7 on 2020-03-13 09:50
from django.db import migrations, models
import stdimage.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PBanner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', stdimage.models.StdImageField(default='', upload_to='Product/banner/', verbose_name='图片路径')),
],
options={
'verbose_name': '商品轮播图',
'verbose_name_plural': '商品轮播图',
},
),
migrations.CreateModel(
name='PInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(blank=True, max_length=255, null=True, verbose_name='颜色')),
('norms', models.CharField(blank=True, max_length=255, null=True, verbose_name='规格')),
('weight', models.IntegerField(blank=True, null=True, verbose_name='重量')),
('price', models.IntegerField(verbose_name='价格')),
('image', stdimage.models.StdImageField(blank=True, default='', upload_to='Product/detail/', verbose_name='图片路径')),
('quantity', models.IntegerField(blank=True, default=10, verbose_name='虚拟库存')),
],
options={
'verbose_name': '商品详情',
'verbose_name_plural': '商品详情',
},
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 09:26:58 2019
@author: Administrator
"""
# 1、将图片转为Base64编码
import base64
with open('C:\\Users\\Administrator\\Desktop\\pic\\mask\\Mark\\An.png','rb') as f:
base64_data = base64.b64encode(f.read())
s = base64_data.decode()
print('data:image/jpeg;base64,%s'%s)
|
import sqlite3
class Product:
def __init__(self):
self.connection = sqlite3.connect('db.sqlite3')
pass
def create_table(self):
cursor = self.connection.cursor()
try:
cursor.execute('create table products (id integer, name text, price text);')
except sqlite3.OperationalError:
print("Таблица уже существует!")
self.connection.commit()
def insert(self, id, name, price):
cursor = self.connection.cursor()
cursor.execute("insert into products values (?, ?, ?)", (id, name, price))
self.connection.commit()
print("Row inserted")
def update(self, id, name=None, price=None):
cursor = self.connection.cursor()
cursor.execute(f"update products set name = 'Milk' where id = {id}")
self.connection.commit()
print("Row updated")
def delete(self, id):
cursor = self.connection.cursor()
cursor.execute(f"delete from products where id = {id}")
self.connection.commit()
print("Row deleted")
product = Product()
product.create_table()
product.insert(1, 'Banana', 100)
product.insert(2, 'Milk', 50)
product.delete(1)
|
from radish.stepregistry import step
from radish import given, when, then
# from twentyone import *
# backgound
@step('a global administrator named "Greg"')
def a_global_administrator_named_greg(step):
print('STEP: Given a global administrator named "Greg"')
@step('a blog named "Greg\'s anti-tax rants"')
def a_blog_named_gregs_antitax_rants(step):
print('STEP: Given a blog named "Greg\'s anti-tax rants"')
@step('a customer named "Wilson"')
def a_customer_named_wilson(step):
print('STEP: Given a customer named "Wilson"')
@step("scenario 2 description")
def scenario_2_description(step):
print("This step is not implemented yet")
@step("something else we can check happens too")
def something_else_we_can_check_happens_too(step):
print("something else we can check happens too")
# assert(1==0)
@step("scenario outline")
def scenario_outline(step):
print("scenario outline")
@step("description")
def description(step):
print("description")
# table comming
@given('the following people exist')
def the_following_people_exist(step):
step.context.users = step.table
@given('some precondition 1')
def some_precondition_1(step):
print(u'STEP: Given some precondition 1')
@when('some action by the actor')
def some_action_by_the_actor(step):
print('STEP: When some action by the actor')
@when('some other action')
def some_other_action(step):
print('STEP: When some other action')
@then('some testable outcome is achieved')
def some_testable_outcome_is_achieved(step):
print('STEP: Then some testable outcome is achieved')
# assert(1==0)
# @then('something else we can check happens too')
# def something_else_we_can_check_happens_too(step):
# print('STEP: Then something else we can check happens too')
# # assert(1==0)
# @step("something else we can check happens too")
# def something_else_we_can_check_happens_too(step):
# print("And something else we can check happens too")
# scenario 2
@given('some precondition')
def some_precondition(step):
# print('STEP: Given some precondition')
step.skip()
#step text data
@given('some other precondition with doc string')
def some_other_precondition_with_doc_string(step):
step.skip()
# step.context.quote = step.text
# print('STEP: Given some other precondition with doc string')
@when('yet another action')
def yet_another_action(step):
print('STEP: When yet another action')
# # *
# @given('something else we can check happens too')
# def step_impl(context):
# raise NotImplementedError(u'STEP: Given something else we can check happens too')
@step('I don\'t see something else')
def i_dont_see_something_else(step):
print('STEP: Given I don\'t see something else')
step.skip()
# scenario 3
# @given('the cow weighs {weight:g} kg')
# def the_cow_weighs_weight_kg(step, weight):
# print('STEP: Given the cow weighs kg: ')
@step(r'Given the cow weighs {weight:g} kg')
def the_cow_weighs_weight_kg(step, weight):
print('STEP: Given the cow weighs kg: ')
@when('we calculate the feeding requirements')
def we_calculate_the_feeding_requirements(step):
print('STEP: When we calculate the feeding requirements')
@then('the energy should be {energy:g} MJ')
def the_energy_should_be_energy_mj(step, energy):
# print('STEP: Then the energy should be MJ: ')
assert(1==0)
# @given(u'a blog named "Greg\'s anti-tax rants"')
# def step_impl(context):
# raise NotImplementedError(u'STEP: Given a blog named "Greg\'s anti-tax rants"')
# @given(u'the cow weighs 500 kg')
# def step_impl(context):
# raise NotImplementedError(u'STEP: Given the cow weighs 500 kg')
#
#
# @then(u'the energy should be 29500 MJ')
# def step_impl(context):
# raise NotImplementedError(u'STEP: Then the energy should be 29500 MJ')
|
# pylint: disable=missing-docstring
import tensorflow as tf
def preprocess_batch(images_batch, preproc_func=None):
"""
Creates a preprocessing graph for a batch given a function that processes
a single image.
:param images_batch: A tensor for an image batch.
:param preproc_func: (optional function) A function that takes in a
tensor and returns a preprocessed input.
"""
if preproc_func is None:
return images_batch
with tf.variable_scope('preprocess'):
images_list = tf.split(images_batch, int(images_batch.shape[0]))
result_list = []
for img in images_list:
reshaped_img = tf.reshape(img, img.shape[1:])
processed_img = preproc_func(reshaped_img)
result_list.append(tf.expand_dims(processed_img, axis=0))
result_images = tf.concat(result_list, axis=0)
return result_images
|
from fastecdsa.curve import secp256k1
from fastecdsa.keys import export_key, gen_keypair
from fastecdsa import curve, ecdsa, keys, point
from hashlib import sha256
def sign(m):
#generate public key
#Your code here
private_key = keys.gen_private_key(secp256k1)
public_key = keys.get_public_key(private_key, secp256k1)
#generate signature
#Your code herer
r,s=ecdsa.sign(m, private_key,secp256k1, sha256, False)
assert isinstance( public_key, point.Point )
assert isinstance( r, int )
assert isinstance( s, int )
return( public_key, [r,s] )
# m = "a message to sign via ECDSA"
# print(sign(m))
|
# !/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# 转码 'clip1' 'IP|xml_path|path|项目id|场id|xml_id|command_id|clip1'
# 回插 'clip2' 'IP|video_path|img_path|frame|width|height|id|command_id|clip2'
# 打包 'clip3' 'IP|FUY/001|xml_path|command_id|clip3'
import os
import time
import shutil
import platform
import socket
from createThumbnail import CreateThumbnail
from httpUrl import CallBack
from upload import UploadFile
import config
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def myServer():
if platform.system() != 'Windows':
print '这是Windows平台,请使用相应脚本!'
exit()
try:
HOST = socket.gethostbyname(socket.gethostname())
except:
print '无法获取本机IP,请联系IT'
exit()
PORT = config.port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
print 'Local IP:', HOST,":",PORT
print "waiting for connection ......"
s.listen(5)
while 1:
conn, addr = s.accept()
print "connected form ....", addr
# 实现并发的三种方式:多进程、多线程、协程
# 多进程的报错:Mac 10.13 objc[72931]: +[__NSPlaceholderDate initialize] may have been in progress in another thread when fork() was called.
# QtCore与多进程无法同时使用,会导致程序意外退出;
# windows的socket无法用多进程,因为无法被序列化;
# 多线程与协程:虽然可以实现socket的并发,但QT库的UI界面只能在主线程运行,无法并发,想要并发只能用内部的QThread;
# 所以Mac与windows无法实现socket的并发
handle(conn)
def handle(conn):
while True:
data = conn.recv(1024)
if not data:
break
print('recv data:', data)
data_split = data.strip().split("|")
sep = os.sep
server_all = config.All
server_post = config.Post
server_ref = config.Reference
server_dai = config.Dailies
server_outcompany = config.OutCompany
if data_split[-1] == "open_dai":
try:
file_path, Uptask = data_split
except:
print '参数错误:',data_split
else:
file_path = file_path.replace("/", "\\")
if os.path.exists(server_all+file_path):
os.popen('explorer.exe %s' % (server_all + file_path)).close()
elif os.path.exists(server_dai+file_path):
os.popen('explorer.exe %s' % (server_dai + file_path)).close()
elif data_split[-1] == "Folder":
try:
file_path, Uptask = data_split
except:
print '参数错误:', data_split
else:
file_path = file_path.replace("/", "\\")
if os.path.exists(server_all+file_path):
os.popen('explorer.exe %s' % (server_all + file_path)).close()
elif os.path.exists(server_dai+file_path):
os.popen('explorer.exe %s' % (server_dai + file_path)).close()
elif os.path.exists(server_post+file_path):
os.popen('explorer.exe %s' % (server_post + file_path)).close()
elif os.path.exists(server_ref+file_path):
os.popen('explorer.exe %s' % (server_ref + file_path)).close()
elif data_split[-1] == "YunFolder":
try:
file_path, create_time, Uptask = data_split
except:
print '参数错误:', data_split
else:
file_path = file_path.replace("/", "\\")
projectName = file_path.split('_')[1]
create_time = time.strftime("%Y%m%d",time.localtime(eval(create_time)))
path = server_outcompany %(projectName,create_time)+ file_path
print path
try:
if os.path.exists(path):
os.popen('explorer.exe %s' % path).close()
else:
print 'the directory not exit'
except Exception as e:
print e
elif data_split[-1] == "Dailies1": # /FUY/001/001/stuff/cmp|file_name|command_id|Dailies1
try:
file_path, file_name, command_id, UpTask = data_split
except:
print '参数错误:', data_split
else:
UploadFile().upload_dailies(server_all, file_path, file_name, command_id, '', '', '')
conn.send('dailies')
elif data_split[-1] == "lgt_dai":
try:
file_path, file_name, command_id, rate, frame, UpTask = data_split
except:
print '参数错误:', data_split
else:
UploadFile().upload_dailies(server_all, file_path, file_name, command_id, rate, frame, UpTask)
conn.send('lgt_dai')
elif data_split[-1] == "download": # huanyu_Fuy_1|download
print 'Do not choose local disk'
downloadPath = UploadFile().select_dir('')
if downloadPath == '':
downloadPath = 'nothing selected'
elif downloadPath.startswith('L'):
pathList = downloadPath.split('\\')
pathList[0] = 'Library'
downloadPath ='/' + '/'.join(pathList)
elif downloadPath.startswith('X'):
pathList = downloadPath.split('\\')
pathList[0] = 'Tron'
downloadPath = '/' + '/'.join(pathList)
elif downloadPath.startswith('J'):
pathList = downloadPath.split('\\')
pathList[0] = 'Post'
downloadPath = '/' + '/'.join(pathList)
elif downloadPath.startswith('G'):
pathList = downloadPath.split('\\')
pathList[0] = 'Illuminafx'
downloadPath = '/' + '/'.join(pathList)
elif downloadPath.startswith('W'):
pathList = downloadPath.split('\\')
pathList[0] = 'Public'
downloadPath = '/' + '/'.join(pathList)
elif downloadPath.startswith('Y'):
pathList = downloadPath.split('\\')
pathList[0] = 'Dailies'
downloadPath = '/' + '/'.join(pathList)
elif downloadPath.startswith('T'):
pathList = downloadPath.split('\\')
pathList[0] = ''
downloadPath = '/' + '/'.join(pathList)
print 'downloadPath:', downloadPath
conn.send(downloadPath)
elif data_split[-1] == "Dailies2":
try:
file_path, file_name, command_id, UpTask = data_split
except:
print '参数错误:', data_split
else:
fileNow = file_name + ".mov"
# 重构file_path: /FUY/stuff/dmt
file_path = file_path + '/mov'
outputPath = "D:/TronDailies/%s" % file_name
server_name = server_all
fileD = server_name + "/" + file_path + "/" + file_name
fileAll = fileD + "/" + fileNow
if os.path.isdir(outputPath):
shutil.rmtree(outputPath)
os.popen("python //192.168.100.99/Public/tronPipelineScript/IlluminaConverter_v002/IlluminaConverter_v002.py %s" % fileNow).read()
print outputPath
if os.path.isdir(outputPath):
file_old = outputPath + '/' + fileNow
shutil.copy(file_old, fileAll)
if os.path.exists(fileAll):
CreateThumbnail().run(fileAll)
CallBack().dai_callback(command_id, file_path + "/" + file_name, fileNow, fileAll)
finally:
conn.send('dailies2')
elif data_split[-1] =="Reference":
try:
file_path, file_name, sql_data, UpTask = data_split
except:
print '参数错误:', data_split
else:
UploadFile().upload_reference(server_ref, file_path, file_name, sql_data)
conn.send('ref')
elif data_split[-1] == 'ShotTask' or data_split[-1] == 'AssetTask': # 提交发布弹框
# "HAC" "01" "001" "rig" "liangcy" "fileName" "ShotTask"
if data_split[-1] == 'ShotTask':
try:
projectName,seqName,shotName,type_,userName,fileName,UpTask = data_split
except:
print '参数错误:', data_split
else:
file_path = projectName + sep + seqName + sep + shotName + sep + 'Stuff' + sep + type_ + sep + 'publish' + sep + fileName
# fileList = UploadFile().select_files()
# for file in fileList:
# shutil.copy(file, file_path)
else:
try:
projectName, type_, userName, fileName, UpTask = data_split
except:
print '参数错误:', data_split
else:
file_path = projectName + sep + 'Stuff' + sep + type_ + sep + 'publish' + sep + fileName
if type_ == "lgt" or type_ == "cmp":
os.popen('explorer.exe %s' % (server_post + sep + file_path)).close()
print (server_post + file_path)
else:
os.popen('explorer.exe %s' % (server_all + sep + file_path)).close()
print (server_all + file_path)
conn.close()
if __name__ == '__main__':
myServer()
|
a=int(input("Enter score by player 1: "))
b=int(input("Enter score by player 2: "))
c=int(input("Enter score by player 3: "))
print("Strike rate of player 1: ",a*100/60)
print("Strike rate of player 2: ",b*100/60)
print("Strike rate of player 3: ",c*100/60)
print("Score of player 1 if 60 more balls are given : ",a*2)
print("Score of player 2 if 60 more balls are given : ",b*2)
print("Score of player 3 if 60 more balls are given: ",c*2)
print("Maximum number of sixes player 1 could hit: ",a//6)
print("Maximum number of sixes player 2 could hit: ",b//6)
print("Maximum number of sixes player 3 could hit: ",c//6)
|
# -*- coding: utf-8 -*-
from unittest.mock import patch
from tests.fixtures.benchmark import ComparativeBenchmarkMother
from bench.app.benchmark.domain.config import NotificationsConfig
from bench.app.benchmark.domain.events import ComparativeBenchmarkFinished
from bench.app.benchmark.infrastructure.repositories import ComparativeBenchmarkInMemoryRepository
from bench.app.benchmark.use_cases.listeners.sms_alert_listener import ComparativeBenchmarkFinishedSmsAlertListener
from bench.app.notifications.use_cases.commands import SendSmsCommand
notifications_config = NotificationsConfig(notification_sms_phone_number='some number')
@patch('bench.app.benchmark.domain.specification.SubjectLoadedTwiceAsSlowThanAtLeastOfCompetitorsSpecification')
def test_it_should_send_sms_if_subject_url_loaded_twice_as_slow_as_at_least_one_of_competitors(specification_mock):
# given
specification_mock.is_satisfied_by.return_value = True
benchmark = ComparativeBenchmarkMother.create_any()
# and
repository = ComparativeBenchmarkInMemoryRepository()
repository.add(benchmark)
# when
listener = ComparativeBenchmarkFinishedSmsAlertListener(notifications_config, specification_mock, repository)
result = listener.execute(ComparativeBenchmarkFinished(benchmark.benchmark_id))
# then
expected_command = SendSmsCommand(notifications_config.notification_sms_phone_number, 'Your site is very slow')
assert expected_command == result
@patch('bench.app.benchmark.domain.specification.SubjectLoadedTwiceAsSlowThanAtLeastOfCompetitorsSpecification')
def test_it_should_not_send_sms_if_subject_url_loaded_faster_than_competitors(specification_mock):
# given
specification_mock.is_satisfied_by.return_value = False
benchmark = ComparativeBenchmarkMother.create_any()
# and
repository = ComparativeBenchmarkInMemoryRepository()
repository.add(benchmark)
# when
listener = ComparativeBenchmarkFinishedSmsAlertListener(notifications_config, specification_mock, repository)
result = listener.execute(ComparativeBenchmarkFinished(benchmark.benchmark_id))
# then
assert result is None
|
# Create your views here.
import datetime
import random
import hashlib
from django.template.loader import get_template
from django.template import Context, RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib import auth
from django.shortcuts import render_to_response, get_object_or_404
from django.core.context_processors import csrf
from latex.models import *
from latex.forms import *
def home(request):
site_template = get_template('template.html')
html = site_template.render(Context({}))
return HttpResponse(html)
#Project view, which is to be shown after user login.
def projects(request):
if request.user.is_authenticated():
u = User.objects.get(username=request.user.username)
project_list = Project.objects.filter(author=u)
return render_to_response("projects.html", {'project_list' : project_list})
#create a project
def create_project(request):
if request.POST:
new_data = request.POST.copy()
form = ProjectForm(new_data)
for i in new_data.values():
if i == "":
return HttpResponse("Do not leave as blank")
if not form.is_valid():
print "error"
user = User.objects.get(username=request.user.username)
short_name = form.data['short_name']
long_name = form.data['long_name']
description = form.data['description']
new_project = Project(author=user, short_name=short_name, long_name=long_name, description=description)
new_project.save()
return HttpResponse("Created new project successfully")
else:
form = ProjectForm()
return render_to_response('new_project.html', {'form':form,}, context_instance=RequestContext(request))
#project_view: shows the files in a project.
def project_view(request, project_id):
project = Project.objects.get(id=project_id)
file_list = Project.objects.get(id=project_id).file_set.all().order_by("-created")
return render_to_response('project-view.html', {'project':project, 'file_list': file_list}, context_instance=RequestContext(request))
#create_file: create a new file, and add it to database
def create_file(request, project_id):
if request.POST:
new_data = request.POST.copy()
form = FileCreateForm(new_data)
for i in new_data.values():
if i == "":
return HttpResponse('Do not leave as blank')
print project_id
project = Project.objects.get(id=project_id) #project to which file is associated.
file_name = form.data['file_name']
file_type = form.data['file_type']
content = form.data['content']
created = datetime.datetime.today()
new_file = File(project=project,file_name=file_name, file_type=file_type, created=created)
new_file.save()
return HttpResponse("Created file!")
else:
form = FileCreateForm()
return render_to_response('file-edit.html', {'form':form}, context_instance=RequestContext(request))
#User Registration
def register_user(request):
if request.POST:
new_data = request.POST.copy()
form = RegistrationForm(new_data)
valid_user = True
for i in new_data.values():
if i == "":
return HttpResponse("Do not leave as blank")
try:
User.objects.get(username=str(form.data['user']))
return HttpResponse("Username already taken.")
except User.DoesNotExist:
valid_user = False
if form.is_valid() == False:
return HttpResponse("Invalid Email ID")
if valid_user==False and form.data['password1']==form.data['password2']:
if len(form.data['password1']) < 6:
return HttpResponse("Passwords should be atleast <br /> 6 characters in length")
new_user = form.save()
salt = hashlib.new('sha', str(random.random())).hexdigest()[:5]
activation_key = hashlib.new('sha', salt+new_user.username).hexdigest()
key_expires = datetime.datetime.today()+datetime.timedelta(2)
new_profile = UserProfile(user=new_user, activation_key=activation_key, key_expires=key_expires, is_active=True)
new_profile.save()
return HttpResponse('User added successfully')
else:
return HttpResponse('Re-enter passwords again.')
else:
form = RegistrationForm()
return render_to_response('register.html', {'form':form,}, context_instance=RequestContext(request))
#User login
def user_login(request):
if request.POST:
new_data = request.POST.copy()
if new_data.has_key('logout'):
auth.logout(request)
return HttpResponse('True')
user = str(new_data['username'])
password = str(new_data['password'])
user_session = auth.authenticate(username=user, password=password)
if user_session:
auth.login(request, user_session)
return HttpResponse('True')
else:
return HttpResponse('False')
else:
form = UserLogin()
return render_to_response('user_login.html', {'form':form,}, context_instance=RequestContext(request))
#To check user logged in or not
def is_logged_in(request):
if request.user.is_authenticated():
return HttpResponse(str(request.user.username))
else:
return HttpResponse('False')
|
import logging
import findspark
import pyspark.sql.functions as F
from pyspark.conf import SparkConf
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
# This is not recognized by IntelliJ!, but still works.
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
import mleap.pyspark
from mleap.pyspark.spark_support import SimpleSparkSerializer
from src.pydad import __version__
from src.pydad.conf import ConfigParams
# Imports MLeap serialization functionality for PySpark
"""
Ref: https://towardsdatascience.com/machine-learning-with-pyspark-and-mllib-solving-a-binary-classification-problem-96396065d2aa
"""
def main():
_logger = logging.getLogger(__name__)
findspark.init(ConfigParams.__SPARK_HOME__)
# Configuration
conf = SparkConf(). \
setAppName('BellSpark')
# Spark Session replaces SparkContext
spark = SparkSession.builder. \
appName("BellSparkTest1"). \
config('spark.jars.packages',
'ml.combust.mleap:mleap-spark-base_2.11:0.9.3,ml.combust.mleap:mleap-spark_2.11:0.9.3'). \
config(conf=conf). \
getOrCreate()
# Read csv
df = spark.read.csv(ConfigParams.__DAD_PATH__, header=True, inferSchema=True)
# Select TLOS and summary variables. As an example, we take only 10
# df = df.select(df.columns[154:])
df = df.select(df.columns[154:164])
# String type converted to float type.
# This is not required as all are Integer
# df = df.select(*(col(c).cast("float").alias(c) for c in df.columns))
# Change all NA to 0
df = df.na.fill(0)
# Recode TLOS_CAT to binary
df = df \
.withColumn('TLOS_CAT_NEW', F.when(df.TLOS_CAT <= 5, 0).otherwise(1)) \
.drop(df.TLOS_CAT)
df.printSchema()
# df = df.select(df.columns[6:])
# df.printSchema()
feature_assembler = VectorAssembler(inputCols=df.select(df.columns[6:]).schema.names, outputCol="features")
df = feature_assembler.transform(df)
# Train and Test
train, test = df.randomSplit([0.7, 0.3], seed=2018)
print("Training Dataset Count: " + str(train.count()))
print("Test Dataset Count: " + str(test.count()))
lr = LogisticRegression(featuresCol='features', labelCol='TLOS_CAT_NEW', maxIter=10)
stages = []
stages += [lr]
pipeline = Pipeline(stages=stages)
pipelineModel = pipeline.fit(df)
predictions = pipelineModel.transform(df)
# Predict
predictions.select('TLOS_CAT_NEW', 'prediction').show(100)
# Serialize
pipelineModel.serializeToBundle("jar:file:/home/beapen/scratch/pyspark.example.zip", pipelineModel.transform(df))
_logger.info("Script ends here")
print(__version__)
if __name__ == '__main__': # if we're running file directly and not importing it
main() # run the main function
|
def main():
try:
a = int(input('Digite o primeiro lado do triângulo:\n'));
b = int(input('Digite o segundo lado do triângulo:\n'));
c = int(input('Digite o terceiro lado do triângulo:\n'));
print('\nSeu triângulo é:', evaluate(a, b, c), '\n');
except: return print('\nTivemos algum problema. \nVerifique se digitou algum número errado. :c\n');
def evaluate(a, b, c):
if(a == b and b == c): return 'Equilátero.';
elif(a != b and a != c): return 'Escaleno.';
else: return 'Isósceles.';
main(); |
import time
from getgauge.python import step
from getgauge.python import DataStoreFactory, Messages
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from hamcrest import *
from step_impl.pages.signup_page import signup_as
from step_impl.api.MobileApp import MobileAppClient
from Browser import BrowserFactory
@step('Create customer account for <first_name> <last_name> with <email>')
def create_new_customer(firstname, lastname, email):
driver = BrowserFactory.getWebdriver()
DataStoreFactory.spec_data_store().put(email + '_driver', driver)
signup_as(driver, firstname, lastname, email, password="passw0rd")
msg = WebDriverWait(driver, 10).until(lambda driver: driver.find_element(By.CLASS_NAME, 'messages').text)
assert_that(msg, any_of(contains_string("Thank you for registering"), contains_string("There is already an account with this email address")))
@step('Login from app as user <email>')
def get_token_for_user(email):
appClient = MobileAppClient(email=email)
DataStoreFactory.spec_data_store().put(email + '_app_client', appClient)
appClient.request_token(password='passw0rd')
@step('Create a cart from app as user <email>')
def create_cart(email):
appClient = DataStoreFactory.spec_data_store().get(email + '_app_client')
appClient.create_cart()
@step('Add item with sku <sku> to cart from app as user <email>')
def add_cart_items(sku, email):
appClient = DataStoreFactory.spec_data_store().get(email + '_app_client')
appClient.add_item_to_cart(sku)
@step('Process and submit order for cart from app as user <email> with firstname=<firstname> lastname=<lastname> street=<street> city=<city> region=<region> postcode=<postcode> country=<country> telephone=<telephone>')
def process_cart(email, firstname, lastname, street, city, region, postcode, country, telephone):
appClient = DataStoreFactory.spec_data_store().get(email + '_app_client')
appClient.set_shipping_info(email, firstname, lastname, street, city, region, postcode, country, telephone)
appClient.submit_cart_payment_info(email, firstname, lastname, street, city, region, postcode, country, telephone)
@step('User <email> closes browser')
def close(email):
driver = DataStoreFactory.spec_data_store().get(email + '_driver')
driver.quit() |
import os
import cv2
import time
def extract_frame(filename):
filepath = "/static/images/" + filename + '/' + str(time.time())
os.mkdir(filepath)
vidcap = cv2.VideoCapture(filepath)
success, image = vidcap.read()
count = 0
# print(success)
frame_count = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
# print(frame_count,frame_count//6)
res = []
for i in range(5):
res.append(frame_count // 6 * i)
print(res)
count_img = 0
while success:
success, image = vidcap.read()
if count in res:
filepath.append()
cv2.imwrite(filepath + '/' + str(count_img) + '.jpg', image) # save frame as JPEG file
count_img += 1
count += 1
return filepath |
#-*- coding: utf-8 -*-
#decorator按照装饰函数和被装饰函数是否带参数可以分为4类
import functools
print('P1-两者都不带参数')
def decorator_one(f1):
@functools.wraps(f1)
def wrapper_two(*args, **kw):
print('start')
print('call ' + f1.__name__)
print('end')
return f1
return wrapper_two
@decorator_one
def f1():
pass
f1()
print('*****')
f1()
print('*****')
print('P2-装饰函数不带参数,被装饰函数带参数')
def decorator_two(f2):
#定义包装函数,用于传递被装饰函数当做参数,使用可变参数和关键字参数可以提升灵活性
@functools.wraps(f2)
def wrapper_two(*args, **kw):
print('start')
print('call ' + f2.__name__)
f2(*args, **kw)
print('end')
# return f2 *******在函数中使用了被装饰函数,就不用再返回这个函数了,因为已经被执行过了*******
return wrapper_two
@decorator_two
def f2(s):
print(s)
f2('test')
print('*****')
f2('test')
print('*****')
print('P3-装饰函数带参数,被装饰函数不带参数')
#装饰函数外层加一层函数,用来传递装饰函数的参数
def my_decorator_three(*args,**kw):
#这里才是装饰函数开始的地方,传入装饰函数参数f3
def decorator_three(f3):
# @functools.wrap(f3) 这句在这里可加可不加,反正不会出现函数名字指定错误
print('start')
print('f3的装饰器函数自带参数为:',*args)
print('call ' + f3.__name__)
print('end')
return f3
return decorator_three
@my_decorator_three('my_decorator_three')
def f3():
pass
f3()
print('*****')
f3()
print('*****')
print('P4-装饰器函数和被装饰函数都带参数')
#这层传递的参数是装饰函数本身需要使用的参数
def my_decorator_four(*args_decorator,**kw_decorator):
#这一层函数传递需要被装饰的函数f4
def decorator_four(f4):
@functools.wraps(f4)
#这一层包装函数用于传递被装饰函数f4所需要的参数
def wrapper(*args,**kw):
print('start')
print('f4的装饰器函数自带参数为:',*args_decorator)
print(f4.__name__ + ' is working')
#在这个函数内已经使用了带参数的被装饰函数f4,所以最后也不需要返回函数f4了
f4(*args, **kw)
print('end')
return wrapper
return decorator_four
@my_decorator_four('my_decorator_four')
def f4(s):
print(s)
f4('test')
print('*****')
f4('test')
print('*****')
print('P5-习题,设计一个装饰函数带可变参数,被装饰函数不带参数的装饰器')
def my_decorator_five(*args, **kw):
def decorator_five(f5):
print(*args)
print('begin call')
#在函数中使用了被装饰函数,就不用再返回这个函数了,因为已经在这被执行过了
print(f5.__name__ + ' is working')
print('end call')
return f5
return decorator_five
@my_decorator_five()
def f5():
print('test')
f5
print('*****')
f5()
print('*****')
print('=================')
@my_decorator_five('execute')
def f6():
print('test')
f6
print('*****')
f6()
print('*****')
print('=================')
print(f1.__name__)
print(f2.__name__)
print(f3.__name__)
print(f4.__name__)
print(f5.__name__)
print(f6.__name__)
|
import os
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import ctypes
import glob
import cv2
class DataLoader:
def __init__(self, data_path, batch_size):
self.index = 0
self.batch_size = batch_size
self.img_list = glob.glob(os.path.join(data_path, "*.jpg"))
self.width = 640
self.height = 640
self.calibration_data = np.zeros((self.batch_size,3,self.width,self.height), dtype=np.float32)
def reset(self):
self.index = 0
def next_batch(self, index):
print(self.calibration_data.shape)
for i in range(self.batch_size):
assert os.path.exists(self.img_list[i + self.index * self.batch_size]), 'not found!!'
img = cv2.imread(self.img_list[i + self.index * self.batch_size])
img = self.preprocess_v1(img)
self.calibration_data[i] = img
# example only
return np.ascontiguousarray(self.calibration_data, dtype=np.float32)
def preprocess_v1(self, image_raw):
h, w, c = image_raw.shape
image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
# Calculate widht and height and paddings
r_w = self.width / w
r_h = self.height / h
if r_h > r_w:
tw = self.width
th = int(r_w * h)
tx1 = tx2 = 0
ty1 = int((self.height - th) / 2)
ty2 = self.height - th - ty1
else:
tw = int(r_h * w)
th = self.height
tx1 = int((self.width - tw) / 2)
tx2 = self.width - tw - tx1
ty1 = ty2 = 0
# Resize the image with long side while maintaining ratio
image = cv2.resize(image, (tw, th))
# Pad the short side with (128,128,128)
image = cv2.copyMakeBorder(
image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128)
)
image = image.astype(np.float32)
# Normalize to [0,1]
image /= 255.0
# HWC to CHW format:
image = np.transpose(image, [2, 0, 1])
# CHW to NCHW format
#image = np.expand_dims(image, axis=0)
# Convert the image to row-major order, also known as "C order":
#image = np.ascontiguousarray(image)
return image
class Calibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, data_path, batch_size, cache_file=""):
trt.IInt8EntropyCalibrator2.__init__(self)
self.data = DataLoader(data_path, batch_size)
self.device_input = cuda.mem_alloc(self.data.calibration_data.nbytes)
self.cache_file = cache_file
self.batch_size = batch_size
self.current_index = 0
def get_batch_size(self):
return self.batch_size
def get_batch(self, names):
if self.current_index + self.batch_size > len(self.data.img_list):
return None
current_batch = int(self.current_index / self.batch_size)
if current_batch % 10 == 0:
print("Calibrating batch {:}, containing {:} images".format(current_batch, self.batch_size))
batch = self.data.next_batch(self.current_index)
self.current_index += 1
cuda.memcpy_htod(self.device_input, batch)
return [int(self.device_input)]
def read_calibration_cache(self):
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache) |
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/search')
def search():
# arguments
condition = request.args.get('q')
return '用户提交的查询参数是: {}'.format(condition)
if __name__ == '__main__':
app.run(debug=True, host='127.0.0.1', port=8081)
print("www")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 14:13:58 2017
@author: 竹间为简
@published in: 简书
"""
import pandas as pd
from statsmodels.tsa.stattools import adfuller
import statsmodels.tsa.stattools as st
import numpy as np
import pyflux as pf
import pickle
import os
#######################################
'''help func'''
def test_stationarity(ts):
dftest = adfuller(ts, autolag='AIC')
#返回的是p值
return dftest[1]
def best_diff(ts, maxdiff = 3):
d_set = []
p_set = []
for i in range(1, maxdiff):
temp = ts.copy() #每次循环前,重置
temp = temp.diff(i)
temp = temp.dropna() #差分后,前几行的数据会变成nan,所以删掉
pvalue = test_stationarity(temp)
d_set.append(i)
p_set.append(pvalue)
d = dict(zip(d_set,p_set))
if d != None:
mindiff = min(d,key=d.get)
return mindiff
def produce_diffed_timeseries(ts, diffn):
if diffn != 0:
temp = ts.diff(diffn)
else:
temp = ts
temp.dropna(inplace=True) #差分之后的nan去掉
return temp
def choose_order(ts, maxar, maxma):
order = st.arma_order_select_ic(ts, maxar, maxma, ic=['aic', 'bic', 'hqic'])
return order.bic_min_order
def predict_recover(ts, train, diffn):
if diffn != 0:
ts.iloc[0] = ts.iloc[0]+train[-diffn]
ts = ts.cumsum()
ts = np.exp(ts)
# ts.dropna(inplace=True)
print('还原完成')
return ts
'''arima model'''
class ARIMA_V1:
def __init__(self,name='should be the name of saved model'):
self.name = name
self.model = ''
self.diffn = 0
def build(self,maxar=4,maxma=4,maxdiff=6,test_size=6,save_path=''):
self.maxar = maxar
self.maxma = maxma
self.maxdiff = maxdiff
self.test_size = test_size
self.save_path=save_path
def train(self,df):
'''输入必须是DataFrame型
索引为时间
对应第一列为需要预测的数据
返回原序列预测序列
'''
data = df.dropna()
diffn = 0
data.loc[:,'log'] = np.log(data[data.columns[0]])
# test_size = int(len(data) * 0.33)
train_size = len(data)-int(self.test_size)
ts, test = data['log'][:train_size], data['log'][train_size:]
if test_stationarity(ts) < 0.01:
print(len(ts),'平稳,不需要差分')
else:
diffn = best_diff(ts, maxdiff = self.maxdiff)
ts = produce_diffed_timeseries(ts, diffn)
print('差分阶数为'+str(diffn)+',已完成差分')
print('开始进行ARMA拟合')
order = choose_order(ts, self.maxar, self.maxma)
print('模型的阶数为:'+str(order))
_ar = order[0]
_ma = order[1]
# print(ts)
print(type(ts))
model = pf.ARIMA(data=ts.values, ar=_ar, ma=_ma,family=pf.Normal())
model.fit("MLE")
test_predict = model.predict(int(self.test_size))
mu,Y=model._model(model.latent_variables.get_z_values())
fitted_values = model.link(mu)
temp = np.ones((len(data)-self.test_size-len(fitted_values)))*np.mean(fitted_values)
fitted_values = np.concatenate((temp,fitted_values))
print(len(fitted_values),len(data))
if self.test_size > 0:
fitted_values = np.concatenate((fitted_values,np.array(test_predict).flatten()))
temp = pd.Series(data=fitted_values,index=data.index)
#re
temp = predict_recover(temp,ts,diffn)
test_predict1 = predict_recover(test_predict, ts, diffn)
RMSE = np.sqrt(((np.array(test_predict1)-np.array(test))**2).sum()/test.size)
print(len(test_predict),"测试集的RMSE为:"+str(RMSE))
self.model = model
self.diffn = diffn
return temp
def save_model(self):
if os.path.exists(self.save_path):
f = open(''.join([self.save_path,self.name,'-',str(self.diffn),'.pkl']),'wb')
pickle.dump(self.model,f)
f.close()
else:
print('path does not exist')
def load_model(self):
if os.path.exists(self.save_path):
f = open(''.join([self.save_path,self.name,'-',self.diffn,'.pkl']),'rb')
self.model = pickle.load(f)
f.close()
else:
print('path does not exist')
def predict(self,load=False,step=1):
if load:
self.load_model()
print(self.model)
else:
print(self.model)
return self.model.predict(step)
|
# Hacer un mensaje personalizado de año nuevo para Joseph, Glenn y Sally
amigos=["Joseph", "Glenn" , "Sally"]
for amigo in amigos:
print(f"¡Feliz año nuevo, {amigo}!")
print("¡Terminado!") |
#
# @lc app=leetcode id=35 lang=python
#
# [35] Search Insert Position
#
# @lc code=start
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
left, right = 0, len(nums) - 1
while left <= right:
mid = left + ((right-left)>>1)
if nums[mid] == target:
return mid
elif nums[mid] > target:
right = mid - 1
else:
left = mid + 1
return left
# @lc code=end
|
import vtk
from core.progress import ProgressBar
from gui.vtkqgl import VTKQGLWidget
class VtkWidget(VTKQGLWidget):
def __init__(self, parent_qt_frame, progress_bar):
super().__init__(parent_qt_frame)
# This one indicates the progress of computationally heavy tasks
self.__progress_bar = progress_bar
# The observers of this guy
self.__observers = list()
# The render window
#self.renderer.SetBackground(0.4, 0.41, 0.42)
self.renderer.SetBackground(1.0, 1.0, 1.0)
self.enable_depthpeeling()
self.__interactor_style = vtk.vtkInteractorStyleTrackballCamera()
self.__interactor_style.AddObserver("KeyReleaseEvent", self.__on_key_released)
self.__interactor_style.AddObserver("LeftButtonPressEvent", self.__on_left_button_pressed)
self.__interactor_style.AddObserver("LeftButtonReleaseEvent", self.__on_left_button_released)
self.__interactor_style.AddObserver("RightButtonPressEvent", self.__on_right_button_pressed)
self.__interactor_style.AddObserver("RightButtonReleaseEvent", self.__on_right_button_released)
self.__interactor_style.AddObserver("MouseMoveEvent", self.__on_mouse_moved)
self.interactor.SetInteractorStyle(self.__interactor_style)
# This guy is very important: it handles all the model selection in the 3D view
self.__prop3d_picker = vtk.vtkPropPicker()
self.interactor.SetPicker(self.__prop3d_picker)
self.__perform_prop3d_picking = True
# We want to see xyz axes in the lower left corner of the window
lower_left_axes_actor = vtk.vtkAxesActor()
lower_left_axes_actor.SetXAxisLabelText("X")
lower_left_axes_actor.SetYAxisLabelText("Y")
lower_left_axes_actor.SetZAxisLabelText("Z")
lower_left_axes_actor.SetTotalLength(1.5, 1.5, 1.5)
self.__lower_left_axes_widget = vtk.vtkOrientationMarkerWidget()
self.__lower_left_axes_widget.SetOrientationMarker(lower_left_axes_actor)
self.__lower_left_axes_widget.KeyPressActivationOff()
self.__lower_left_axes_widget.SetInteractor(self.render_window_interactor)
self.__lower_left_axes_widget.SetViewport(0.0, 0.0, 0.2, 0.2)
self.__lower_left_axes_widget.SetEnabled(1)
self.__lower_left_axes_widget.InteractiveOff()
def add_observer(self, observer):
self.__observers.append(observer)
def __on_key_released(self, interactor, data):
if data == "KeyReleaseEvent":
key = self.interactor.GetKeySym()
for observer in self.__observers:
try:
observer.on_key_released(self, key)
except AttributeError:
pass
def __on_left_button_pressed(self, interactor, data):
for observer in self.__observers:
try:
observer.on_left_button_pressed(self)
except AttributeError:
pass
# Forward the event
self.__interactor_style.OnLeftButtonDown()
def __on_left_button_released(self, interactor, data):
# First, report the left button release event
for observer in self.__observers:
try:
observer.on_left_button_released(self)
except AttributeError:
pass
# Forward the event
self.__interactor_style.OnLeftButtonUp()
def __on_right_button_pressed(self, interactor, data):
for observer in self.__observers:
try:
observer.on_right_button_pressed(self)
except AttributeError:
pass
# Forward the event
self.__interactor_style.OnRightButtonDown()
def __on_right_button_released(self, interactor, data):
# First, report the left button release event
for observer in self.__observers:
try:
observer.on_right_button_released(self)
except AttributeError:
pass
# Forward the event
self.__interactor_style.OnRightButtonUp()
def __on_mouse_moved(self, interactor, data):
for observer in self.__observers:
try:
observer.on_mouse_moved(self)
except AttributeError:
pass
# Forward the event
self.__interactor_style.OnMouseMove()
def pick(self):
# Get the first renderer assuming that the event took place there
renderer = self.interactor.GetRenderWindow().GetRenderers().GetFirstRenderer()
# Where did the user click with the mouse
xy_pick_pos = self.interactor.GetEventPosition()
# Perform the picking
self.__prop3d_picker.Pick(xy_pick_pos[0], xy_pick_pos[1], 0, renderer)
# Call the user with the picked prop
return self.__prop3d_picker.GetProp3D()
def is_ctrl_key_pressed(self):
return self.interactor.GetControlKey() != 0
def is_shift_key_pressed(self):
return self.interactor.GetShiftKey() != 0
def add_models(self, models):
if not models:
return
# Tell the user we are busy
self.__progress_bar.init(1, len(models), "Adding models to 3D renderer: ")
counter = 0
# Add data to the renderer and to the internal dictionary
for model in models:
counter += 1
# We need a data item with a prop3d
try:
prop3d = model.visual_representation.prop3d
except AttributeError:
pass
else:
self.renderer.AddActor(prop3d)
# Update the progress bar
self.__progress_bar.set_progress(counter)
# Update the 3d view
self.reset_clipping_range()
# We are done
self.__progress_bar.done()
def delete_models(self, models):
for model in models:
try: # we can handle only data items that are pickable, i.e., that have a visual representation with a prop3d
prop3d = model.visual_representation.prop3d
except AttributeError:
pass
else:
self.renderer.RemoveActor(prop3d)
# Update the 3d view
self.reset_clipping_range()
def render(self):
"""Renders the scene"""
self.render_window_interactor.Render()
def reset_clipping_range(self):
"""Resets the clipping range of the camera and renders the scene"""
self.renderer.ResetCameraClippingRange()
self.render_window_interactor.Render()
def reset_view(self):
"""Modifies the camera such that all (visible) data items are in the viewing frustum."""
self.renderer.ResetCamera()
self.renderer.ResetCameraClippingRange()
self.render_window_interactor.Render()
def get_camera_position(self):
return self.renderer.GetActiveCamera().GetPosition()
def set_camera_position(self, position):
return self.renderer.GetActiveCamera().SetPosition(position)
def get_camera_look_at(self):
return self.renderer.GetActiveCamera().GetFocalPoint()
def set_camera_look_at(self, look_at):
return self.renderer.GetActiveCamera().SetFocalPoint(look_at)
def get_camera_view_up(self):
return self.renderer.GetActiveCamera().GetViewUp()
def set_camera_view_up(self, view_up):
return self.renderer.GetActiveCamera().SetViewUp(view_up)
|
"""
Tests for members of user groups. With these tests in place, we can
safely use principalsWith(..) elsewhere.
"""
from common import assertPrincipalsWithRole
from common import principalsWith
taskcluster_permacreds = set([
'client-id-alias:permacred-dustin',
'client-id-alias:permacred-garndt',
'client-id-alias:permacred-jhford',
'client-id-alias:permacred-jonasfj',
'client-id-alias:permacred-pmoore',
'client-id-alias:permacred-selena',
'client-id-alias:permacred-wcosta',
])
releng_permacreds = set([
'client-id-alias:permacred-bhearsum',
'client-id-alias:permacred-jlund',
'client-id-alias:permacred-mrrrgn',
'client-id-alias:permacred-mshal',
'client-id-alias:permacred-rail',
])
relops_permacreds = set([
'client-id-alias:permacred-dustin',
'client-id-alias:permacred-rthijssen',
])
def test_releng():
assertPrincipalsWithRole('mozilla-group:releng', [
# all of the relengers
releng_permacreds,
# plus team_relops, because they're OK too
principalsWith('mozilla-group:team_relops'),
# taskcluster folks have *, hence matching this group
principalsWith('mozilla-group:team_taskcluster'),
], omitTrusted=True)
def test_relops():
assertPrincipalsWithRole('mozilla-group:team_relops', [
relops_permacreds,
# taskcluster folks have *, hence matching this group
principalsWith('mozilla-group:team_taskcluster'),
], omitTrusted=True)
def test_taskcluster():
assertPrincipalsWithRole('mozilla-group:team_taskcluster', [
taskcluster_permacreds,
], omitTrusted=True)
def test_moco():
assertPrincipalsWithRole('mozilla-group:team_moco', [
'client-id-alias:temporary-credentials', # Bug 1233553
# everyone with a legacy permacred is considered an honorary moco
# employee
principalsWith('legacy-permacred'),
# taskcluster folks have *, hence matching this group
principalsWith('mozilla-group:team_taskcluster'),
], omitTrusted=True)
def test_scm_level_1():
assertPrincipalsWithRole('mozilla-group:scm_level_1', [
# a whole bunch of people "manually" granted this role
'client-id-alias:brson',
'client-id-alias:drs',
'client-id-alias:gerard-majax',
'client-id-alias:kgrandon',
'client-id-alias:mihneadb',
'client-id-alias:npark',
'client-id-alias:nullaus',
'client-id-alias:permacred-rthijssen',
'client-id-alias:russn',
'client-id-alias:rwood',
'client-id-alias:shako',
'client-id-alias:sousmangoosta',
# taskcluster folks have *, hence matching this group
principalsWith('mozilla-group:team_taskcluster'),
], omitTrusted=True)
def test_scm_level_2():
assertPrincipalsWithRole('mozilla-group:scm_level_2', [
# taskcluster folks have *, hence matching this group
principalsWith('mozilla-group:team_taskcluster'),
], omitTrusted=True)
def test_scm_level_3():
assertPrincipalsWithRole('mozilla-group:scm_level_3', [
# a whole bunch of people "manually" granted this role
'client-id-alias:permacred-armenzg',
'client-id-alias:permacred-armenzg-testing',
'client-id-alias:permacred-bhearsum',
'client-id-alias:permacred-jlund',
'client-id-alias:permacred-mrrrgn',
'client-id-alias:permacred-mshal',
'client-id-alias:permacred-nhirata',
'client-id-alias:permacred-rail',
'client-id-alias:permacred-ted',
'client-id-alias:temporary-credentials',
'client-id:gandalf',
# taskcluster folks have *, hence matching this group
principalsWith('mozilla-group:team_taskcluster'),
], omitTrusted=True)
|
#! /usr/bin/env python
from setuptools import find_packages, setup
def read(filename):
with open(filename, "r", encoding="utf-8") as fp:
return fp.read()
long_description = u'\n\n'.join(
[read('README.rst'), read('CREDITS.rst'), read('CHANGES.rst')]
)
setup(
name="amazeinator",
version="0.2.1.dev0",
description="An amazing Pyhton package",
long_description=long_description,
author="Mark Piper",
author_email="mpiper@colorado.edu",
url="https://github.com/mdpiper",
keywords=["amazing"],
install_requires=open("requirements.txt", "r").read().splitlines(),
packages=find_packages(),
include_package_data=True,
)
|
'''
Suppose a sorted array is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Find the minimum element.
Example
Given [4,4,5,6,7,0,1,2] return 0.
'''
class Solution:
# @param num: a rotated sorted array
# @return: the minimum number in the array
# solution1
def findMin(self, num):
if not num:
return -sys.maxint
start, end = 0, len(num) - 1
while start + 1 < end:
mid = (start + end) / 2
if num[mid] > num[end]:
start = mid + 1
# start = mid + 1 ?# This also works fine.
elif num[mid] < num[end]:
end = mid - 1
# end = mid # This also works fine
else:
end = end - 1
return min(num[start], num[end])
'''
算法武器:二分法(基于有重复元素的RSA)
算法思想:
使用二分法模板,定义start,end指针
while循环条件为start < end
while循环体中计算mid,根据mid指向的元素的值和end指向元素的值得关系,有条件地更新start和end指针
当num[mid] > num[end]时,通过画图,我们知道,我们的解在右边,所以我们更新start = mid + 1
当num[mid] < num[end]时,通过画图,我们知道,我们的解在左边,所以我们更新end = mid - 1
当num[mid] == num[end]时,通过画图,我们知道,我们需要进一步缩小上届,因为题目中有重复元素,重复元素会导致死循环,这一步相当关键
本题的解为start和end所指元素中比较小的那个
本题的难点在于想到使用num[mid]和num[end]的关系来最终求解最小值的位置
思路上有点像三分法:
如果mid更接近最小值,那么我们就舍弃end右边部分的区间
如果end更接近最小值,我们就舍弃mid左边的区间(包括mid)
''' |
"""
Phone Numbers - SOLUTION
Parse this phone number so that a computer can process it. (Hint: It can't include any non-numeric characters.)
"""
cell = '1.192.168.0143'
cell = cell.split('.')
cell = int(''.join(cell))
print(cell) # 11921680143 |
# For sample only, not for production use
import datetime
import hashlib
import os
import shutil
MAX_ARTICLE_PER_DAY = 3
YEAR_LIST = [2000,2002,2003]
MONTH_CHANCE = 0.5
DAY_CHANCE = 0.2
ARTICLE_CHANCE = 0.5
FILENAME_CHANCE = 0.5
SHA_INT_END = int('f'*64,16)+1
# print(SHA_INT_END)
def main():
shutil.rmtree('blogs', ignore_errors=True)
for YEAR in YEAR_LIST:
start_date = datetime.date(YEAR,1,1)
end_date = datetime.date(YEAR+1,1,1)
for date in date_range(start_date, end_date):
if not good_hash_chance(date.strftime('%Y%m'), MONTH_CHANCE): continue
if not good_hash_chance(date.strftime('%Y%m%d'), DAY_CHANCE): continue
# print(date.strftime('%Y%m%d'))
for article_idx in range(3):
if not good_hash_chance(date.strftime('%Y%m%d')+str(article_idx), ARTICLE_CHANCE): continue
yyyy = date.strftime('%Y')
yyyymm = date.strftime('%Y-%m')
yyyymmdd = date.strftime('%Y-%m-%d')
yyyymmddaa = date.strftime('%Y-%m-%d') + '-' + str(article_idx).zfill(2)
title = 'T' + hash(f'title-{yyyymmddaa}')[:12]
content = 'content ' + hash(f'content-{yyyymmddaa}')
tag_list = list(bin(int(hash(f'tag-{yyyymmddaa}')[-1:],16))[2:].zfill(4))
tag_list = zip(range(len(tag_list)),tag_list)
tag_list = filter(lambda i:i[1]!='0',tag_list)
tag_list = map(lambda i:f'tag-{i[0]}',tag_list)
tag_list = list(tag_list)
file_path = os.path.join('blogs',yyyy,yyyymm,f'{yyyymmddaa}-{title}.html.jinja')
if not os.path.isdir(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, mode='wt') as fout:
fout.write('{% extends "article_block.jinja" %}\n')
fout.write('\n')
fout.write(f'{{% set date = "{yyyymmdd}" %}}\n')
if good_hash_chance('filename'+date.strftime('%Y%m%d')+str(article_idx), FILENAME_CHANCE):
fout.write(f'{{% set filename = "fn{title}" %}}\n')
fout.write(f'{{% set order = {article_idx} %}}\n')
fout.write(f'{{% set title = "{title}" %}}\n')
fout.write(f'{{% set tag_list = {tag_list} %}}\n')
fout.write('\n')
fout.write('{% set content %}\n')
fout.write(f'<p>{content}</p>\n')
fout.write('{% endset %}\n')
def date_range(start_date, end_date):
date = start_date
while date < end_date:
yield date
date += datetime.timedelta(days=1)
def hash(s):
m = hashlib.sha256()
m.update(s.encode('utf8'))
return m.hexdigest()
def good_hash_chance(s,c):
s = hash(s)
s = int(s,16)
s = s / SHA_INT_END
return s < c
main()
|
from bloghandler import BlogHandler
from models.user import User
from models.post import Post
from models.comment import Comment
from models.like import Like
from helper import *
from google.appengine.ext import db
class LikePost(BlogHandler):
def get_post_by_id(self, post_id):
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
return post
def get(self, post_id):
self.redirect("/blog/%s" % (str(post_id)))
return
def post(self, post_id):
if not self.user:
msg = 'Login to Like Post'
self.render('login-form.html', error=msg)
return
post = self.get_post_by_id(post_id)
if not post:
self.error(404)
return
logged_name = self.get_user_name()
numoflikes = Like.gql("WHERE post_id = '%s' " % (str(post_id))).count()
if logged_name == post.author:
comments = Comment.gql("WHERE postid = '%s' " % (str(post_id)))
msg = "Oops, don't like your own post"
self.render("permalink.html", post=post, comments=comments,
error=msg, numoflikes=numoflikes)
else:
like = Like.gql("WHERE post_id = '%s' AND author = '%s'" %
(str(post_id), logged_name)).count()
if like == 0:
new_like = Like(post_id=str(post_id), author=str(logged_name))
new_like.put()
numoflikes = Like.gql("WHERE post_id = '%s' " %
(str(post_id))).count()
msg = "You liked the post"
else:
msg = "You couldn't like a post twice"
comments = Comment.gql("WHERE postid = '%s' " % (str(post_id)))
self.render("permalink.html", post=post,
comments=comments, error=msg, numoflikes=numoflikes)
return
|
# Simple program that returns a list of the 10 most common words in Shakespeare's Romeo and Juliet.
import string
shakes = open('romeo.txt')
counts = {}
for line in shakes:
line = line.translate(None, string.punctuation)
line = line.lower()
words = line.split()
for word in words:
if word not in counts:
counts[word] = 1
else:
counts[word] += 1
# Sorting the dictionary by value
List = list()
for key, val in counts.items():
List.append((val, key))
List.sort(reverse = True)
for key, val in List[:10]:
print key, val
shakes.close()
|
from flask import Flask, render_template, request,redirect, url_for,flash
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin,login_user,current_user,login_required,logout_user
from werkzeug.security import generate_password_hash, check_password_hash
from flask_migrate import Migrate
import os
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')
app.secret_key = "abc"
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "signin"
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
firstname = db.Column(db.String(255), nullable=False)
lastname = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(225), nullable= False, unique= True)
password = db.Column(db.String(225), nullable= False)
img_url = db.Column(db.Text)
post = db.relationship('Post',backref="user",lazy=True)
like_post = db.relationship('Post', secondary="likes", backref="my_likes", lazy=True)
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self,password):
return check_password_hash(self.password,password)
db.create_all()
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
author=db.Column(db.String(255), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
body = db.Column(db.Text, nullable = False)
created = db.Column(db.DateTime, server_default=db.func.now())
updated = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now()),
img_url = db.Column(db.Text)
view_count = db.Column(db.Integer, default = 0)
likes = db.Table('likes',
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True),
db.Column('post_id', db.Integer, db.ForeignKey('post.id'), primary_key=True))
db.create_all()
class Comment(db.Model):
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.String, nullable = False)
author_comment= db.Column(db.String, nullable= False)
user_id = db.Column(db.Integer, nullable = False)
post_id = db.Column(db.Integer, nullable=False)
created = db.Column(db.DateTime, server_default=db.func.now())
updated = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
img_url = db.Column(db.Text)
db.create_all()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@app.route('/signup', methods=['GET','POST'])
def signup():
if request.method == "POST":
user = User.query.filter_by(email=request.form['email']).first()
if user:
flash('Email address already exists.')
return redirect(url_for("signup"))
new_user = User(email = request.form['email'], firstname= request.form['firstname'],lastname=request.form['lastname'],img_url = request.form['imgUrl'])
new_user.set_password(request.form['password'])
db.session.add(new_user)
db.session.commit()
flash("You have signed up successfully", 'success')
return redirect(url_for("signup"))
return render_template("view/signup.html")
@app.route('/', methods=['GET','POST'])
def signin():
if current_user.is_authenticated:
return redirect(url_for('main_page'))
if request.method == "POST":
user = User.query.filter_by(email=request.form['email']).first()
if user:
if user.check_password(request.form['password']):
login_user(user)
flash('welcome {0}'.format(user.email), 'success')
return redirect(url_for("main_page"))
flash('Incorrect Email or Password','warning')
return redirect(url_for("signin"))
flash('Incorrect Email or Password','warning')
return redirect(url_for("signin"))
return render_template("view/signin.html")
@app.route('/blog', methods=['GET','POST'])
@login_required
def main_page():
if not current_user.is_authenticated:
return redirect(url_for("signin"))
if request.method == "POST":
new_post = Post(body = request.form['body'],author=current_user.email.split("@")[0],
img_url = current_user.img_url)
current_user.post.append(new_post)
db.session.add(new_post)
db.session.commit()
return redirect(url_for("main_page"))
posts = Post.query.all()
for post in posts:
post.comments = Comment.query.filter_by(post_id= post.id).first()
return render_template('view/blog.html', posts = posts)
#delete an entry
@app.route('/blog/<id>', methods = ['GET','POST'])
def delete_blog(id):
if request.method == "POST":
post = Post.query.filter_by(id = id).first()
if not post:
return "there is no such post"
db.session.delete(post)
db.session.commit()
return redirect(url_for("main_page"))
return "Not allowed"
@app.route('/post/<id>', methods=['GET','POST'])
@login_required
def view_post(id):
post = Post.query.get(id)
post.view_count +=1
db.session.commit()
if request.method == "POST":
new_comment = Comment(body=request.form['body'],user_id=current_user.id,post_id=id, author_comment=current_user.email.split("@")[0], img_url=current_user.img_url)
db.session.add(new_comment)
db.session.commit()
return redirect(url_for("view_post", id=id))
comments = Comment.query.filter_by(post_id=id).all()
for comment in comments:
comment.author_comment = User.query.filter_by(id = comment.user_id).first().email
return render_template("view/post.html", post=post, comments = comments)
@app.route('/like/post/<int:id>', methods=['POST'])
@login_required
def like_post(id):
post = Post.query.get(id)
if not post.my_likes:
current_user.like_post.append(post)
db.session.commit()
return redirect(url_for("main_page"))
current_user.like_post.remove(post)
db.session.commit()
return redirect(url_for("main_page"))
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("signin"))
if __name__ == "__main__":
app.run(debug = True) |
# -*- coding: utf-8 -*-
# @Time : 2018/5/7 14:46
# @Site :
# @File : urls.py
# @Software: PyCharm
import requests
import json
import time
from django.http import HttpResponse
import datetime
from django.views import View
from dbinfo.encrypt_decode import encrypt_and_decode
from dbinfo.views import getpermsessage
class ZabbixApi(object):
"""
Zabbix API类
"""
#超时时间(5秒钟)
TIMEOUT = 50
class FailedError(Exception):
"""
使用Zabbix API失败时出错
"""
ERROR_MESSAGE_TEMPLATE = '"{message}({code}): {data}"'
def __init__(self,name,reason = None):
"""
构造函数
:param name: 失败的方法名称
:param reason: 错误响应
"""
message = "Failed to {0}.".format(name)
if reason is not None:
message = ''.join([message,self.ERROR_MESSAGE_TEMPLATE.format(**reason)])
super(ZabbixApi.FailedError,self).__init__(message)
class AuthenticationFailedError(FailedError):
"""
验证ZabbixToken失败
"""
def __init__(self,reason = None):
"""
构造函数
:param reason: 失败的方法名称
"""
super(ZabbixApi.AuthenticationFailedError,self).__init__('authenticate',reason)
def __init__(self,encode = 'utf-8',zabbixurl=None, zabbixuser=None, zabbixpassword=None):
"""
构造函数
:param request_id:JSON-RPC请求标识符
"""
getpermsessages = getpermsessage()
if getpermsessages:
self.uri = getpermsessages.get('zabbixurl', '')
self.zabbixuser = getpermsessages.get('zabbixuser', '')
self.zabbixpassword = encrypt_and_decode().decrypted_text(getpermsessages.get('zabbixpassword', ''))#解密
if zabbixurl:
self.uri = zabbixurl
if zabbixuser:
self.zabbixuser = zabbixuser
if zabbixpassword:
self.zabbixpassword = zabbixpassword
def call(self,method,params,AUTH=None):
"""
ZabbixAPI请求程序
:param method: Zabbix API方法名称
:param params: Zabbix API方法参数
:param through_authenticate: 事前预认证
:return:
"""
if AUTH:
body = json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'auth': AUTH,
'id': 2
})
else:
body = json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': 2
})
headers = {'Content-Type': 'application/json-rpc'}
try:
request = requests.post(self.uri,data=body,headers=headers,timeout=self.TIMEOUT)
response_json = request.json()
if 'result' in response_json:
return response_json
elif 'error' in response_json:
return ZabbixApi.FailedError(name=method,reason=response_json['error'])
else:
return ZabbixApi.AuthenticationFailedError()
except requests.exceptions.ConnectTimeout:
return ZabbixApi.AuthenticationFailedError({'code': -1, 'message': 'Connect Timeout.', 'data': 'URI is incorrect.'})
def authenticate(self):
"""
执行认证
:return:
"""
response = self.call('user.login', {'user': self.zabbixuser, 'password': self.zabbixpassword})
if 'result' in response:
self.session_id = response['result']
return response['result']
elif 'error' in response:
raise ZabbixApi.AuthenticationFailedError(response['error'])
else:
raise ZabbixApi.AuthenticationFailedError()
def get_hosts_template(template_name):
'''
根据模板 名称获取 模板templateid 再获取获取相关主机hostid
:param template_name:
:return:
'''
try:
hosts = []
zapi = ZabbixApi()
token = zapi.authenticate()
template_get = zapi.call('template.get', {
"output": 'output',
"filter": {
"host": template_name}}, token)
template_get_result = template_get['result']
if template_get_result:
templateid = template_get_result[0]['templateid']
# 根据 templateid 获得 活动的 hostid
host_get = zapi.call('host.get', {'output':['host'],'templateids':templateid,'filter': {"status": "0",'available': '1'}}, token)
host_get_result = host_get['result']
if host_get_result:
for host in host_get_result:
hosts.append(host.get('host'))
return hosts
else:
return hosts
else:
return hosts
except Exception as e:
return hosts
def get_hostid(ip):
'''
根据Ip 获取hostid
:param ip:
:return:
'''
method = 'host.get'
params = {"filter": {"host": ip}}
try:
zapi = ZabbixApi()
token = zapi.authenticate()
result = zapi.call(method, params, token)
return result['result'][0]['hostid']
except Exception:
return False
def get_item(hostid,item_name):
'''
根据hostid item_name 查找监控项
:param hostid:
:param item_name:
:return:
'''
method = 'item.get'
params = {"optput": ['name', 'itemid', 'value_type'],
"hostids": hostid,
"search": {'name': item_name},
"filter": {'status': "0", "error": "", "state": 0}}
try:
zapi = ZabbixApi
token = zapi.authenticate()
result = zapi.call(method=method, params=params, AUTH=token)
return result
except Exception as e:
print(e)
return False
def get_item_trend(host,item_name,time_from=None,time_till=None):
'''
查找监控项的趋势数据
:param ip:
:param item_name:
:param time_from:
:param time_till:
:return:
'''
try:
zapi = ZabbixApi()
token = zapi.authenticate()
history = []
get_hostid= zapi.call('host.get', {"filter": {"host": host}}, token)
get_hostid_result = get_hostid.get('result','')
if get_hostid_result:
hostid = get_hostid_result[0].get("hostid", '')
item_get = zapi.call('item.get', {"output": ['name', 'itemid', 'value_type'],
'hostids': hostid,
'search': {'name': item_name},
'filter': {"status": "0", "error": "", "state": '0'}}, token)
item_get_results = item_get.get('result','')
for item_get_result in item_get_results:
itemid = item_get_result['itemid']
itemid_name = item_get_result['name']
itemid_value_type = item_get_result['value_type']
if time_from:
time_from = datetime.datetime.strptime(time_from, '%Y-%m-%d %H:%M:%S') #字符串转 datatime
time_from = time.mktime(time_from.timetuple()) # datetime 转 unix 时间戳
else:
time_from = time.time() - 24 * 60 * 60 #默认获取一天前的时间
if time_till:
time_till = datetime.datetime.strptime(time_till, '%Y-%m-%d %H:%M:%S') # 字符串转 datatime
time_till = time.mktime(time_till.timetuple()) # datetime 转 unix 时间戳
else:
time_till = time.time() # 默认获取当前时间
trend_get = zapi.call('trend.get', {"output": 'extend', 'itemids': itemid,
'time_from': time_from,
'time_till': time_till}, token)
history_get_results = trend_get.get('result','')
if history_get_results:
for history_get_result in history_get_results:
clock = history_get_result['clock']
value_avg = history_get_result['value_avg']
value_max = history_get_result['value_max']
value_min = history_get_result['value_min']
num = history_get_result['num']
# 对时间格式化,转换成字符串
clocktime = (datetime.datetime.fromtimestamp(int(clock))).strftime('%Y-%m-%d %H:%M:%S')
history.append({"num": num, "value_min": value_min, "value_max": value_max, "value": value_avg, "clocktime": clocktime, 'name':itemid_name, "value_type": itemid_value_type})
return history
except Exception as e:
print(e)
return []
class get_item_history_api(View):
def get(self, requests):
# def get_item_history_api(requests):
try:
ip = requests.GET.get('ip')
item_name = requests.GET.get('item_name')
time_from = requests.GET.get('time_from')
time_till = requests.GET.get('time_till')
datetime.datetime.strptime(time_till, "%Y-%m-%d %H:%M:%S")
datetime.datetime.strptime(time_from, "%Y-%m-%d %H:%M:%S")
if (datetime.datetime.strptime(time_till, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(time_from, "%Y-%m-%d %H:%M:%S")).days >= 1:
a = get_item_trend(ip,item_name,time_from,time_till)
else:
a = get_item_history(ip,item_name,time_from,time_till)
response = HttpResponse()
response['Content-Type'] = "application/json"
response.write(json.dumps(a, default=str).encode("UTF-8"))
return response
except Exception as e:
response = HttpResponse()
response['Content-Type'] = "application/json"
response.write(json.dumps([], default=str).encode("UTF-8"))
return response
class get_group_item_api(View):
def get(self, requests):
# def get_group_item_api(requests):
try:
template_name = requests.GET.get('template_name')
a = get_hosts_template(template_name)
response = HttpResponse()
response['Content-Type'] = "application/json"
response.write(json.dumps(a, default=str).encode("UTF-8"))
return response
except Exception as e:
response = HttpResponse()
response['Content-Type'] = "application/json"
response.write(json.dumps([], default=str).encode("UTF-8"))
return response
def get_item_history(ip,item_name,time_from=None,time_till=None):
try:
zapi = ZabbixApi()
token = zapi.authenticate()
history = []
get_hostid= zapi.call('host.get', {"filter": {"host": ip}}, token)
get_hostid_result = get_hostid.get('result','')
if get_hostid_result:
hostid = get_hostid_result[0].get("hostid", '')
item_get = zapi.call('item.get', {"output": ['name', 'itemid', 'value_type'],
'hostids': hostid,
'search': {'name': item_name},
'filter': {"status": "0", "error": "", "state": '0'}}, token)
item_get_results = item_get.get('result','')
for item_get_result in item_get_results:
itemid = item_get_result['itemid']
itemid_name = item_get_result['name']
itemid_value_type = item_get_result['value_type']
if time_from:
time_from = datetime.datetime.strptime(time_from, '%Y-%m-%d %H:%M:%S') # 字符串转 datatime
time_from = time.mktime(time_from.timetuple()) # datetime 转 unix 时间戳
else:
time_from = time.time() - 1 * 60 * 60 #默认获取一个小时前的时间
if time_till:
time_till = datetime.datetime.strptime(time_till, '%Y-%m-%d %H:%M:%S') # 字符串转 datatime
time_till = time.mktime(time_till.timetuple()) # datetime 转 unix 时间戳
else:
time_till = time.time() # 默认获取当前时间
history_get = zapi.call('history.get', {"output": 'extend', 'itemids': itemid,
'history': itemid_value_type,
'time_from': time_from,
'time_till': time_till}, token)
history_get_results = history_get.get('result','')
if history_get_results:
for history_get_result in history_get_results:
clock = history_get_result['clock']
value = history_get_result['value']
ns = history_get_result['ns']
# 对时间格式化,转换成字符串
clocktime = (datetime.datetime.fromtimestamp(int(clock))).strftime('%Y-%m-%d %H:%M:%S')
history.append({"value": value, "clocktime": clocktime, 'name':itemid_name, "value_type": itemid_value_type})
return history
except Exception as e:
print(e)
return []
#查找多个监控项的趋势数据
def get_item_trends(host,item_name,time_from=None,time_till=None):
'''
查找多个监控项的趋势数据
:param ip:
:param item_name:
:param time_from:
:param time_till:
:return:
'''
try:
history = []
zapi = ZabbixApi()
token = zapi.authenticate()
get_hostid= zapi.call('host.get', {"filter": {"host": host}}, token)
get_hostid_result = get_hostid.get('result','')
if get_hostid_result:
hostid = get_hostid_result[0].get("hostid", '')
item_get = zapi.call('item.get', {"output": ['name', 'itemid', 'value_type'],
'hostids': hostid,
'search': {'name': item_name},
'filter': {"status": "0", "error": "", "state": '0'}}, token)
item_get_results = item_get.get('result','')
if time_from:
time_from = datetime.datetime.strptime(time_from, '%Y-%m-%d %H:%M:%S') # 字符串转 datatime
time_from = time.mktime(time_from.timetuple()) # datetime 转 unix 时间戳
else:
time_from = time.time() - 24 * 60 * 60 # 默认获取一天前的时间
if time_till:
time_till = datetime.datetime.strptime(time_till, '%Y-%m-%d %H:%M:%S') # 字符串转 datatime
time_till = time.mktime(time_till.timetuple()) # datetime 转 unix 时间戳
else:
time_till = time.time() # 默认获取当前时间
for item_get_result in item_get_results:
item_get_trend = []
itemid = item_get_result['itemid']
itemid_name = item_get_result['name']
itemid_value_type = item_get_result['value_type']
trend_get = zapi.call('trend.get', {"output": 'extend', 'itemids': itemid,
'time_from': time_from,
'time_till': time_till}, token)
history_get_results = trend_get.get('result','')
if history_get_results:
for history_get_result in history_get_results:
clock = history_get_result['clock']
value_avg = history_get_result['value_avg']
value_max = history_get_result['value_max']
value_min = history_get_result['value_min']
num = history_get_result['num']
# 对时间格式化,转换成字符串
clocktime = (datetime.datetime.fromtimestamp(int(clock))).strftime('%Y-%m-%d %H:%M:%S')
item_get_trend.append({"num": num, "value_min": value_min, "value_max": value_max, "value": value_avg, "clocktime": clocktime, 'name':itemid_name, "value_type": itemid_value_type})
history.append(item_get_trend)
return history
except Exception as e:
print(e)
return history
#查找多个监控项的历史数据
def get_item_historys(ip,item_name,time_from=None,time_till=None):
'''
查找多个监控项的历史数据
:param ip:
:param item_name:
:param time_from:
:param time_till:
:return:
'''
try:
history = []
zapi = ZabbixApi()
token = zapi.authenticate()
get_hostid= zapi.call('host.get', {"filter": {"host": ip}}, token)
get_hostid_result = get_hostid.get('result','')
if get_hostid_result:
hostid = get_hostid_result[0].get("hostid", '')
item_get = zapi.call('item.get', {"output": ['name', 'itemid', 'value_type'],
'hostids': hostid,
'search': {'name': item_name},
'filter': {"status": "0", "error": "", "state": '0'}}, token)
item_get_results = item_get.get('result','')
if time_from:
time_from = datetime.datetime.strptime(time_from, '%Y-%m-%d %H:%M:%S') # 字符串转 datatime
time_from = time.mktime(time_from.timetuple()) # datetime 转 unix 时间戳
else:
time_from = time.time() - 1 * 60 * 60 # 默认获取一个小时前的时间
if time_till:
time_till = datetime.datetime.strptime(time_till, '%Y-%m-%d %H:%M:%S') # 字符串转 datatime
time_till = time.mktime(time_till.timetuple()) # datetime 转 unix 时间戳
else:
time_till = time.time() # 默认获取当前时间
for item_get_result in item_get_results:
item_get_history = []
itemid = item_get_result['itemid']
itemid_name = item_get_result['name']
itemid_value_type = item_get_result['value_type']
history_get = zapi.call('history.get', {"output": 'extend', 'itemids': itemid,
'history': itemid_value_type,
'time_from': time_from,
'time_till': time_till}, token)
history_get_results = history_get.get('result','')
if history_get_results:
for history_get_result in history_get_results:
clock = history_get_result['clock']
value = history_get_result['value']
ns = history_get_result['ns']
# 对时间格式化,转换成字符串
clocktime = (datetime.datetime.fromtimestamp(int(clock))).strftime('%Y-%m-%d %H:%M:%S')
item_get_history.append({"value": value, "clocktime": clocktime, 'name':itemid_name, "value_type": itemid_value_type})
history.append(item_get_history)
return history
except Exception as e:
print(e)
return history
class get_items_history_api(View):
def get(self, requests):
# def get_items_history_api(requests):
try:
ip = requests.GET.get('ip')
item_name = requests.GET.get('item_name')
time_from = requests.GET.get('time_from')
time_till = requests.GET.get('time_till')
datetime.datetime.strptime(time_till, "%Y-%m-%d %H:%M:%S")
datetime.datetime.strptime(time_from, "%Y-%m-%d %H:%M:%S")
if (datetime.datetime.strptime(time_till, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(time_from, "%Y-%m-%d %H:%M:%S")).days >= 1:
a = get_item_trends(ip,item_name,time_from,time_till)
else:
a = get_item_historys(ip,item_name,time_from,time_till)
response = HttpResponse()
response['Content-Type'] = "application/json"
response.write(json.dumps(a, default=str).encode("UTF-8"))
return response
except Exception as e:
response = HttpResponse()
response['Content-Type'] = "application/json"
response.write(json.dumps([], default=str).encode("UTF-8"))
return response |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import hsrb_interface
import rospy
import sys
import math
import tf
import tf2_ros
import tf2_geometry_msgs
import IPython
from hsrb_interface import geometry
from geometry_msgs.msg import PoseStamped, Point, WrenchStamped
from tmc_suction.msg import (
SuctionControlAction,
SuctionControlGoal
)
import actionlib
import time
import cv2
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from sensor_msgs.msg import Image, CameraInfo, JointState
from image_geometry import PinholeCameraModel as PCM
from il_ros_hsr.p_pi.bed_making.com import Bed_COM as COM
from il_ros_hsr.p_pi.bed_making.tensioner import Tensioner
from il_ros_hsr.core.sensors import Gripper_Torque
import il_ros_hsr.p_pi.bed_making.config_bed as cfg
import thread
from il_ros_hsr.core.sensors import RGBD, Gripper_Torque, Joint_Positions
from il_ros_hsr.core.rgbd_to_map import RGBD2Map
from table_top import TableTop
from fast_grasp_detect.data_aug.draw_cross_hair import DrawPrediction
import numpy.linalg as LA
import numpy as np
from il_ros_hsr.core.Xbox import XboxController
__SUCTION_TIMEOUT__ = rospy.Duration(20.0)
_CONNECTION_TIMEOUT = 10.0
class InitialSampler(object):
def __init__(self,cam):
#topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
not_read = True
while not_read:
try:
cam_info = cam.read_info_data()
if(not cam_info == None):
not_read = False
except:
rospy.logerr('info not recieved')
self.pcm = PCM()
self.cam = cam
self.pcm.fromCameraInfo(cam_info)
self.br = tf.TransformBroadcaster()
self.tl = tf.TransformListener()
self.xbox = XboxController()
def debug_images(self,p_1,p_2):
c_img = self.cam.read_color_data()
p_1i = (int(p_1[0]),int(p_1[1]))
p_2i = (int(p_2[0]),int(p_2[1]))
cv2.line(c_img,p_1i,p_2i,(0,0,255),thickness = 10)
cv2.imshow('debug',c_img)
cv2.waitKey(300)
#IPython.embed()
def project_to_rgbd(self,trans):
M_t = tf.transformations.translation_matrix(trans)
M_R = self.get_map_to_rgbd()
M_cam_trans = np.matmul(LA.inv(M_R),M_t)
return M_cam_trans[0:3,3]
def make_projection(self,t_1,t_2):
###GO FROM MAP TO RGBD###
t_1 = self.project_to_rgbd(t_1)
t_2 = self.project_to_rgbd(t_2)
p_1 = self.pcm.project3dToPixel(t_1)
p_2 = self.pcm.project3dToPixel(t_2)
self.debug_images(p_1,p_2)
def debug_broadcast(self,pose,name):
while True:
self.br.sendTransform((pose[0], pose[1], pose[2]),
tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=0.0),
rospy.Time.now(),
name,
#'head_rgbd_sensor_link')
'rgbd_sensor_rgb_frame_map')
def get_map_to_rgbd(self):
not_found = True
while not_found:
try:
pose = self.tl.lookupTransform('map','rgbd_sensor_rgb_frame_map', rospy.Time(0))
not_found = False
except:
rospy.logerr("waiting for pose")
M = tf.transformations.quaternion_matrix(pose[1])
M_t = tf.transformations.translation_matrix(pose[0])
M[:,3] = M_t[:,3]
return M
def get_postion(self,name):
not_found = True
while not_found:
try:
pose = self.tl.lookupTransform('map',name, rospy.Time(0))
not_found = False
except:
rospy.logerr("waiting for pose")
M = tf.transformations.quaternion_matrix(pose[1])
M_t = tf.transformations.translation_matrix(pose[0])
M[:,3] = M_t[:,3]
return pose[0]
def look_up_transform(self,count):
transforms = self.tl.getFrameStrings()
for transform in transforms:
current_grasp = 'bed_i_'+str(count)
if current_grasp in transform:
print 'got here'
pose = self.tl.lookupTransform('rgbd_sensor_rgb_frame_map',transform, rospy.Time(0))
M = tf.transformations.quaternion_matrix(pose[1])
M_t = tf.transformations.translation_matrix(pose[0])
M[:,3] = M_t[:,3]
return M
def sample_corners(self):
head_up = self.get_postion("head_up")
head_down = self.get_postion("head_down")
bottom_up = self.get_postion("bottom_up")
bottom_down = self.get_postion("bottom_down")
# Get true midpoints of table edges
# (i.e. don't assume bottom_up and head_up have the same y value
# in case sensor image is tilted)
middle_up = np.array([(bottom_up[0] + head_up[0])/2,
(bottom_up[1] + head_up[1])/2,
bottom_down[2]])
middle_down = np.array([(bottom_down[0] + head_down[0])/2,
(bottom_down[1] + head_down[1])/2,
bottom_down[2]] )
bottom_middle = np.array([(bottom_down[0] + bottom_up[0])/2,
(bottom_down[1] + bottom_up[1])/2,
bottom_down[2]] )
head_middle = np.array([(head_down[0] + head_up[0])/2,
(head_down[1] + head_up[1])/2,
bottom_down[2]])
center = np.array([(bottom_middle[0] + head_middle[0])/2,
(middle_down[1] + middle_up[1])/2,
bottom_down[2]])
# Generate random point in sensor frame for the closer corner
# Draws from gaussian distribution
u_down = np.random.uniform(low=0.0,high=0.5)
v_down = np.random.uniform(low=0.3,high=1.0)
x_down = center[0] + u_down*LA.norm(center - bottom_middle)
y_down = center[1] + v_down*LA.norm(center - middle_down)
down_corner = (x_down, y_down, center[2])
# Generate random point in sensor frame for the further corner
# Draws from gaussian distribution
u_up = np.random.uniform(low=0.0,high=0.5)
v_up = np.random.uniform(low=0.3,high=1.0)
x_up = center[0] - u_up*LA.norm(center - bottom_middle)
y_up = center[1] - v_up*LA.norm(center - middle_up)
up_corner = (x_up, y_up, center[2])
print("Here's the initial state sampled:")
print " CENTER ", center
print " UP CORNER ", up_corner
print " DOWN CORNER ", down_corner
# Daniel: this is causing some pjust to see what happens here.
#if center[1] < 0.0 or center[2] < 0.0:
# raise "ROBOT TRANSFROM INCORRECT"
if center[1] < 0.0 or center[2] < 0.0:
print("Warning: initial state `center` is not right; ignoring for now ...")
print("")
return down_corner, up_corner
def sample_initial_state(self):
down_corner, up_corner = self.sample_corners()
button = 1.0
while button > -0.1:
control_state = self.xbox.getControllerState()
d_pad = control_state['d_pad']
button = d_pad[1]
self.make_projection(down_corner,up_corner)
return down_corner, up_corner
if __name__=='__main__':
robot = hsrb_interface.Robot()
whole_body = robot.get('whole_body')
omni_base = robot.get('omni_base')
com = COM()
rgbd_map = RGBD2Map()
cam = RGBD()
com.go_to_initial_state(whole_body)
tt = TableTop()
tt.find_table(robot)
# tt.move_to_pose(omni_base,'lower_start')
# whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})
time.sleep(5)
IS = InitialSampler(cam)
while True:
IS.sample_initial_state()
|
from django import forms
from .models import Testmony
class TestmonyForm(forms.ModelForm):
class Meta:
model = Testmony
fields = ("first_name", "last_name", "image1", "product", "message") |
from PyObjCTools.TestSupport import TestCase, min_os_level
import objc
import GameController
class TestGCGamePad(TestCase):
@min_os_level("10.9")
def testClasses(self):
self.assertIsInstance(GameController.GCGamepad, objc.objc_class)
@min_os_level("10.9")
def testMethods(self):
self.assertResultIsBlock(GameController.GCGamepad.valueChangedHandler, b"v@@")
self.assertArgIsBlock(
GameController.GCGamepad.setValueChangedHandler_, 0, b"v@@"
)
|
preco = float(input(" Digite o preco "))
pagamento = float(input(" Digite o pagamento "))
if (preco > pagamento):
b = round(preco-pagamento,2)
print(" Falta ", b)
else:
g = round(pagamento-preco,2)
print(" Troco de", g)
|
import datetime as _datetime
from time_mock import time_mocked, setup
class datetime_mocked(_datetime.datetime):
@classmethod
def now(cls, tz=None):
t = time_mocked()
return cls.fromtimestamp(t, tz)
_datetime.datetime = datetime_mocked
|
#Authors: Prince Rios and Alex Espinoza
#Last modified: 10 March 2020
#Description: This code retrieves data of a specific user the fortnite tracker network API. First, the code imports requests to request the data
#from the tracker network. Next, a variable is created that represents the url link to a specific profile. After this, a headers dictionary is
#created with one key and the value being the api key. Finally, the code uses requests.get method that takes in the url and headers dictionary as
#parameters to retrieve the data. The resulting data is printed using the .text() method.
import requests
user = input("Enter a psn user\n")
URL = f"https://api.fortnitetracker.com/v1/profile/psn/{user}"
headers = {'TRN-Api-Key' : 'e1331dce-bf2b-4e07-a00f-728ff529edc0'}
res = requests.get(URL, headers=headers)
print(res.json()['lifeTimeStats']) |
english = set(['door', 'car', 'lunar', 'era'])
spanish = set(['era', 'lunar', 'hola'])
print('english: ', english)
print('spanish: ', spanish)
both = english.intersection(spanish)
print(both)
|
from Engine.UrEngine import *
def changePlayer(currentPlayer):
if(currentPlayer == 1):
newplayer = 2
else:
newplayer = 1
return newplayer
def parseAction(actionInput):
parsedAction = str.split(actionInput," ")
if(parsedAction[0] == "AddPawn"):
return ["AddPawn",""]
elif(parsedAction[0] == "Move"):
return ["Move",parsedAction[1]]
else:
return ["KO",""]
# create the main game
game = UrEngine()
dice = Dice()
print("Welcome to the Royal Game of Ur")
#initializing the variable that indicates if the game is ended or not
gameIsOn = True
currentPlayer = 1
#mainloop
while(gameIsOn):
game.printGame()
print("Your turn player %s"% (currentPlayer))
rollresult = dice.roll()
print("Roll of dice: %s" % (rollresult))
actionWasComplete = False
if(rollresult == 0):
print("The roll result was 0, next player")
else:
actionInput = input("Actions: - AddPawn \b -Move [board square] $> ")
action = parseAction(actionInput)
actionResult = ""
if(action[0] == "AddPawn"):
print(action)
actionResult = game.playerAddPawn(currentPlayer,rollresult)
if (moveResult == "MoveOK"):
actionWasComplete = True
else:
pass
elif(action[0] == "Move"):
print(action)
moveResult = game.movePlayerPawn(rollresult,action[1],currentPlayer)
if(moveResult == "MoveOK"):
actionWasComplete = True
else:
pass
else:
print("action invalid")
print("")
if(actionResult == ""):
print("invalid command please retry...")
if(actionWasComplete):
currentPlayer = changePlayer(currentPlayer) |
"""
Main Application.
"""
from __future__ import absolute_import
from flask import jsonify, g, session, Flask, request, render_template
from werkzeug.utils import import_string
from unveiled.config import DEBUG, LOG_FORMAT
import logging
import unveiled.err_handlers as err_handlers
from unveiled.scheduler import init_scheduled_jobs
import os
if DEBUG:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
api_blueprints = [
'face_reco',
'face_det'
]
def create_app():
""" Create flask application. """
app = Flask(__name__)
app.config.from_object('unveiled.config')
# Register blueprints
for bp_name in api_blueprints:
bp = import_string('%s.blueprints.%s:bp' % (__package__, bp_name))
print('Registering bp: %s' % bp_name)
app.register_blueprint(bp)
@app.route('/')
def index():
""" Root handler. """
return render_template(
'home.html',
active_tab="tab1",
ga_track_id=os.getenv('GA_TRACK_ID', ''),
)
@app.route('/api/1/status')
def status():
""" Status api handler. """
return jsonify({
"status": "ok"
})
# register error handlers
app.register_error_handler(404, err_handlers.page_not_found)
app.register_error_handler(403, err_handlers.page_forbidden)
app.register_error_handler(500, err_handlers.internal_server_error)
# init scheduler
with app.app_context():
init_scheduled_jobs() # returns scheduler object
return app
if __name__ == '__main__':
""" Main entrypoint. """
logging.basicConfig(level=loglevel,
format=LOG_FORMAT,
datefmt='%Y-%m-%d %H:%M:%S %z')
app = create_app()
print('Created app.') |
"""Core logic for bias-correction and downscaling
Math stuff and business logic goes here. This is the "business logic".
"""
import numpy as np
import logging
from skdownscale.spatial_models import SpatialDisaggregator
import xarray as xr
from xclim import sdba, set_options
from xclim.sdba.utils import equally_spaced_nodes
from xclim.core.calendar import convert_calendar
import xesmf as xe
logger = logging.getLogger(__name__)
# Break this down into a submodule(s) if needed.
# Assume data input here is generally clean and valid.
def train_quantiledeltamapping(
reference, historical, variable, kind, quantiles_n=100, window_n=31
):
"""Train quantile delta mapping
Parameters
----------
reference : xr.Dataset
Dataset to use as model reference.
historical : xr.Dataset
Dataset to use as historical simulation.
variable : str
Name of target variable to extract from `historical` and `reference`.
kind : {"+", "*"}
Kind of variable. Used for QDM scaling.
quantiles_n : int, optional
Number of quantiles for QDM.
window_n : int, optional
Centered window size for day-of-year grouping.
Returns
-------
xclim.sdba.adjustment.QuantileDeltaMapping
"""
qdm = sdba.adjustment.QuantileDeltaMapping(
kind=str(kind),
group=sdba.Grouper("time.dayofyear", window=int(window_n)),
nquantiles=equally_spaced_nodes(int(quantiles_n), eps=None),
)
qdm.train(ref=reference[variable], hist=historical[variable])
return qdm
def adjust_quantiledeltamapping_year(
simulation, qdm, year, variable, halfyearwindow_n=10, include_quantiles=False
):
"""Apply QDM to adjust a year within a simulation.
Parameters
----------
simulation : xr.Dataset
Daily simulation data to be adjusted. Must have sufficient observations
around `year` to adjust.
qdm : xr.Dataset or sdba.adjustment.QuantileDeltaMapping
Trained ``xclim.sdba.adjustment.QuantileDeltaMapping``, or
Dataset representation that will be instantiate
``xclim.sdba.adjustment.QuantileDeltaMapping``.
year : int
Target year to adjust, with rolling years and day grouping.
variable : str
Target variable in `simulation` to adjust. Adjusted output will share the
same name.
halfyearwindow_n : int, optional
Half-length of the annual rolling window to extract along either
side of `year`.
include_quantiles : bool, optional
Whether or not to output quantiles (sim_q) as a coordinate on
the bias corrected data variable in output.
Returns
-------
out : xr.Dataset
QDM-adjusted values from `simulation`. May be a lazy-evaluated future, not
yet computed.
"""
year = int(year)
variable = str(variable)
halfyearwindow_n = int(halfyearwindow_n)
if isinstance(qdm, xr.Dataset):
qdm = sdba.adjustment.QuantileDeltaMapping.from_dataset(qdm)
# Slice to get 15 days before and after our target year. This accounts
# for the rolling 31 day rolling window.
timeslice = slice(
f"{year - halfyearwindow_n - 1}-12-17", f"{year + halfyearwindow_n + 1}-01-15"
)
simulation = simulation[variable].sel(
time=timeslice
) # TODO: Need a check to ensure we have all the data in this slice!
if include_quantiles:
# include quantile information in output
with set_options(sdba_extra_output=True):
out = qdm.adjust(simulation, interp="nearest").sel(time=str(year))
# make quantiles a coordinate of bias corrected output variable
out = out["scen"].assign_coords(sim_q=out.sim_q)
else:
out = qdm.adjust(simulation, interp="nearest").sel(time=str(year))
return out.to_dataset(name=variable)
def train_analogdownscaling(
coarse_reference, fine_reference, variable, kind, quantiles_n=620, window_n=31
):
"""Train analog-inspired quantile-preserving downscaling
Parameters
----------
coarse_reference : xr.Dataset
Dataset to use as resampled (to fine resolution) coarse reference.
fine_reference : xr.Dataset
Dataset to use as fine-resolution reference.
variable : str
Name of target variable to extract from `coarse_reference` and `fine_reference`.
kind : {"+", "*"}
Kind of variable. Used for creating AIQPD adjustment factors.
quantiles_n : int, optional
Number of quantiles for AIQPD.
window_n : int, optional
Centered window size for day-of-year grouping.
Returns
-------
xclim.sdba.adjustment.AnalogQuantilePreservingDownscaling
"""
# AIQPD method requires that the number of quantiles equals
# the number of days in each day group
# e.g. 20 years of data and a window of 31 = 620 quantiles
# check that lengths of input data are the same, then only check years for one
if len(coarse_reference.time) != len(fine_reference.time):
raise ValueError("coarse and fine reference data inputs have different lengths")
# check number of years in input data (subtract 2 for the +/- 15 days on each end)
num_years = len(np.unique(fine_reference.time.dt.year)) - 2
if (num_years * int(window_n)) != quantiles_n:
raise ValueError(
"number of quantiles {} must equal # of years {} * window length {}, day groups must {} days".format(
quantiles_n, num_years, int(window_n), quantiles_n
)
)
aiqpd = sdba.adjustment.AnalogQuantilePreservingDownscaling(
kind=str(kind),
group=sdba.Grouper("time.dayofyear", window=int(window_n)),
nquantiles=quantiles_n,
)
aiqpd.train(coarse_reference[variable], fine_reference[variable])
return aiqpd
def adjust_analogdownscaling(simulation, aiqpd, variable):
"""Apply AIQPD to downscale bias corrected output.
Parameters
----------
simulation : xr.Dataset
Daily bias corrected data to be downscaled.
aiqpd : xr.Dataset or sdba.adjustment.AnalogQuantilePreservingDownscaling
Trained ``xclim.sdba.adjustment.AnalogQuantilePreservingDownscaling``, or
Dataset representation that will instantiate
``xclim.sdba.adjustment.AnalogQuantilePreservingDownscaling``.
variable : str
Target variable in `simulation` to downscale. Downscaled output will share the
same name.
Returns
-------
out : xr.Dataset
AIQPD-downscaled values from `simulation`. May be a lazy-evaluated future, not
yet computed.
"""
variable = str(variable)
if isinstance(aiqpd, xr.Dataset):
aiqpd = sdba.adjustment.AnalogQuantilePreservingDownscaling.from_dataset(aiqpd)
out = aiqpd.adjust(simulation[variable])
return out.to_dataset(name=variable)
def apply_bias_correction(
gcm_training_ds,
obs_training_ds,
gcm_predict_ds,
train_variable,
out_variable,
method,
):
"""Bias correct input model data using specified method,
using either monthly or +/- 15 day time grouping. Currently
the QDM method is supported.
Parameters
----------
gcm_training_ds : Dataset
training model data for building quantile map
obs_training_ds : Dataset
observation data for building quantile map
gcm_predict_ds : Dataset
future model data to be bias corrected
train_variable : str
variable name used in training data
out_variable : str
variable name used in downscaled output
method : {"QDM"}
method to be used in the applied bias correction
Returns
-------
ds_predicted : xr.Dataset
Dataset that has been bias corrected.
"""
if method == "QDM":
# instantiates a grouper class that groups by day of the year
# centered window: +/-15 day group
group = sdba.Grouper("time.dayofyear", window=31)
model = sdba.adjustment.QuantileDeltaMapping(group=group, kind="+")
model.train(
ref=obs_training_ds[train_variable], hist=gcm_training_ds[train_variable]
)
predicted = model.adjust(sim=gcm_predict_ds[train_variable])
else:
raise ValueError("this method is not supported")
ds_predicted = predicted.to_dataset(name=out_variable)
return ds_predicted
def apply_downscaling(
bc_ds,
obs_climo_coarse,
obs_climo_fine,
train_variable,
out_variable,
method,
domain_fine,
weights_path=None,
):
"""Downscale input bias corrected data using specified method.
Currently only the BCSD method for spatial disaggregation is
supported.
Parameters
----------
bc_ds : Dataset
Model data that has already been bias corrected.
obs_climo_coarse : Dataset
Observation climatologies at coarse resolution.
obs_climo_fine : Dataset
Observation climatologies at fine resolution.
train_variable : str
Variable name used in obs data.
out_variable : str
Variable name used in downscaled output.
method : {"BCSD"}
Vethod to be used in the applied downscaling.
domain_fine : Dataset
Domain that specifies the fine resolution grid to downscale to.
weights_path : str or None, optional
Path to the weights file, used for downscaling to fine resolution.
Returns
-------
af_fine : xr.Dataset
A dataset of adjustment factors at fine resolution used in downscaling.
ds_downscaled : xr.Dataset
A model dataset that has been downscaled from the bias correction resolution to specified domain file resolution.
"""
if method == "BCSD":
model = SpatialDisaggregator(var=train_variable)
af_coarse = model.fit(bc_ds, obs_climo_coarse, var_name=train_variable)
# regrid adjustment factors
# BCSD uses bilinear interpolation for both temperature and precip to
# regrid adjustment factors
af_fine = xesmf_regrid(af_coarse, domain_fine, "bilinear", weights_path)
# apply adjustment factors
predicted = model.predict(
af_fine, obs_climo_fine[train_variable], var_name=train_variable
)
else:
raise ValueError("this method is not supported")
ds_downscaled = predicted.to_dataset(name=out_variable)
return af_fine, ds_downscaled
def build_xesmf_weights_file(x, domain, method, filename=None):
"""Build ESMF weights file for regridding x to a global grid
Parameters
----------
x : xr.Dataset
domain : xr.Dataset
Domain to regrid to.
method : str
Method of regridding. Passed to ``xesmf.Regridder``.
filename : optional
Local path to output netCDF weights file.
Returns
-------
outfilename : str
Path to resulting weights file.
"""
out = xe.Regridder(
x,
domain,
method=method,
filename=filename,
)
return str(out.filename)
def _add_cyclic(ds, dim):
"""
Adds wrap-around, appending first value to end of data for named dimension.
Basically an xarray version of ``cartopy.util.add_cyclic_point()``.
"""
return ds.map(
lambda x, d: xr.concat([x, x.isel({d: 0})], dim=d),
keep_attrs=True,
d=str(dim),
)
def xesmf_regrid(x, domain, method, weights_path=None, astype=None, add_cyclic=None):
"""
Regrid a Dataset.
Parameters
----------
x : xr.Dataset
domain : xr.Dataset
Domain to regrid to.
method : str
Method of regridding. Passed to ``xesmf.Regridder``.
weights_path : str, optional
Local path to netCDF file of pre-calculated XESMF regridding weights.
astype : str, numpy.dtype, or None, optional
Typecode or data-type to which the regridded output is cast.
add_cyclic : str, or None, optional
Add cyclic point (aka wrap-around pixel) to given dimension before
regridding. Useful for avoiding dateline artifacts along longitude
in global datasets.
Returns
-------
xr.Dataset
"""
if add_cyclic:
x = _add_cyclic(x, add_cyclic)
regridder = xe.Regridder(
x,
domain,
method=method,
filename=weights_path,
)
if astype:
return regridder(x).astype(astype)
return regridder(x)
def standardize_gcm(ds, leapday_removal=True):
"""
Parameters
----------
ds : xr.Dataset
leapday_removal : bool, optional
Returns
-------
xr.Dataset
"""
# Remove cruft coordinates, variables, dims.
cruft_vars = ("height", "member_id", "time_bnds")
dims_to_squeeze = []
coords_to_drop = []
for v in cruft_vars:
if v in ds.dims:
dims_to_squeeze.append(v)
elif v in ds.coords:
coords_to_drop.append(v)
ds_cleaned = ds.squeeze(dims_to_squeeze, drop=True).reset_coords(
coords_to_drop, drop=True
)
# Cleanup time.
# if variable is precip, need to update units to mm day-1
if "pr" in ds_cleaned.variables:
# units should be kg/m2/s in CMIP6 output
if ds_cleaned["pr"].units == "kg m-2 s-1":
# convert to mm/day
mmday_conversion = 24 * 60 * 60
ds_cleaned["pr"] = ds_cleaned["pr"] * mmday_conversion
# update units attribute
ds_cleaned["pr"].attrs["units"] = "mm day-1"
else:
# we want this to fail, as pr units are something we don't expect
raise ValueError("check units: pr units attribute is not kg m-2 s-1")
if leapday_removal:
# if calendar is just integers, xclim cannot understand it
if ds.time.dtype == "int64":
ds_cleaned["time"] = xr.decode_cf(ds_cleaned).time
# remove leap days and update calendar
ds_noleap = xclim_remove_leapdays(ds_cleaned)
# rechunk, otherwise chunks are different sizes
ds_out = ds_noleap.chunk({"time": 730, "lat": len(ds.lat), "lon": len(ds.lon)})
else:
ds_out = ds_cleaned
return ds_out
def xclim_remove_leapdays(ds):
"""
Parameters
----------
ds : xr.Dataset
Returns
-------
xr.Dataset
"""
ds_noleap = convert_calendar(ds, target="noleap")
return ds_noleap
def apply_wet_day_frequency_correction(ds, process):
"""
Parameters
----------
ds : xr.Dataset
process : {"pre", "post"}
Returns
-------
xr.Dataset
Notes
-------
[1] A.J. Cannon, S.R. Sobie, & T.Q. Murdock, "Bias correction of GCM precipitation by quantile mapping: How well do methods preserve changes in quantiles and extremes?", Journal of Climate, vol. 28, Issue 7, pp. 6938-6959.
"""
threshold = 0.05 # mm/day
low = 1e-16
if process == "pre":
ds_corrected = ds.where(ds != 0.0, np.random.uniform(low=low, high=threshold))
elif process == "post":
ds_corrected = ds.where(ds >= threshold, 0.0)
else:
raise ValueError("this processing option is not implemented")
return ds_corrected
|
"""
Django settings for sciblog project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ['']
# Set DEBUG = False in production. Set DEBUG = True in localhost development
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.syndication',
'django.contrib.sitemaps',
'libs.django-disqus.disqus', # for comments
'libs.ckeditor', # for managing text,images and formulas
'libs.ckeditor_uploader',
)
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'blog.middleware.MobileTemplatesMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
"django.core.context_processors.media",
"django.core.context_processors.static",
)
#list of IPs able to see the toolbar
INTERNAL_IPS=('127.0.0.1','localhost',)
ROOT_URLCONF = 'sciblog.urls'
WSGI_APPLICATION = 'sciblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'img')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/img/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'blog','static'),
)
# Template directory
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'blog', 'templates'),
)
MOBILE_TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'blog', 'templates', 'mobile'),
)
DESKTOP_TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'blog', 'templates', 'desktop'),
)
# Disqus configuration (for managing comments)
# To install disqus http://django-disqus.readthedocs.org/en/latest/index.html
DISQUS_API_KEY = 'a4b0vyjnp1sE5hYt8GP7blDgoe1Y0ohfY4gNoWh8JpZCSyGvVN41JOrhpPgREWeZ'
DISQUS_WEBSITE_SHORTNAME = 'miguelgfierro'
# Http protocol with (https://) or without SSL (http://)
# NOTE: You need to have a SSL certificate installed before setting this flag to True
HTTPS = True
# Social Networks
FACEBOOK_ID = '556883141128364' #for Facebook tracking
FACEBOOK_URL = ''
TWITTER_URL = 'https://twitter.com/miguelgfierro'
TWITTER_HANDLE = 'miguelgfierro'
LINKEDIN_URL = 'https://es.linkedin.com/in/miguelgfierro'
GOOGLE_PLUS_URL = 'https://plus.google.com/+MiguelGonzalezFierro'
PINTEREST_URL = ''
INSTAGRAM_URL = ''
RSS_URL = 'http://feeds.feedburner.com/miguelgfierro'
# Google Analytics
GA_TRACKING_ID = 'UA-70996723-1'
# Ckeditor
CKEDITOR_UPLOAD_PATH = "upload/"
CKEDITOR_IMAGE_BACKEND = "pillow"
|
"""
Implementation for an undirected, weighted graph data structure. Very
similar to `graph_undirected.py` implementation.
(0)---7---(1)
/ \
1 2
/ \
(4)-------4--------(2)----3----(3)---2---(5)
"""
import random
# adjacency list representation of above graph
# { nodeA: {neighbor: edge_weight, neighbor: edge_weight},
# nodeB: {neighbor: edge_weight},
# ...,
# nodeN: {neighbor: edge_weight, ...}
# }
graph = {0: {1: 7, 4: 1},
1: {0: 7, 2: 2},
2: {1: 2, 4: 4, 3: 3},
3: {2: 3, 5: 2},
4: {2: 4, 0: 1},
5: {3: 2}}
# use to test that the graph was created appropriately
def dfs(graph):
if graph:
stack = []
visited = set()
start = random.choice(list(graph.keys()))
stack.append(start)
visited.add(start)
while stack:
current_node = stack.pop()
print(current_node)
for neighbor in graph[current_node]:
if neighbor not in visited:
stack.append(neighbor)
visited.add(neighbor)
def find_shortest_paths(graph, source_node):
"""
Find the shortest paths using Dijkstra's algorithm.
Returns shortest paths to every node from a given source node.
"""
if source_node not in graph:
print("Invalid starting node.")
return
# all nodes initially are unvisited
# all distances from source to a given node are infinity
# previous node in optimal path from source is None
unvisited, from_source, prev = set([]), {}, {}
for node in graph:
from_source[node] = float("inf")
prev[node] = None
unvisited.add(node)
# set known distance from source_node to source_node
from_source[source_node] = 0
while unvisited:
# pick unvisited node w/ least dist from source (greedy algo)
# COULD USE A MIN PRIORITY QUEUE HERE INSTEAD
shortest = float("inf")
for node, dist in from_source.items():
if node in unvisited and dist < shortest:
current = node
shortest = dist
# remove it from unvisited
unvisited.remove(current)
# examine each of its unvisited neighbors
# update distances (shorter paths update old)
for neighbor, edge_weight in graph[current].items():
if neighbor in unvisited:
temp_dist = from_source[current] + edge_weight
if temp_dist < from_source[neighbor]:
from_source[neighbor] = temp_dist
prev[neighbor] = current
# return the shortest paths
return from_source
def find_shortest_path(graph, source, target):
"""
Find the shortest path between two nodes using Dijkstra's
algorithm.
Near identical to above implementation.
"""
if source not in graph:
print("Invalid starting node.")
return
elif target not in graph:
print("Invalid target node.")
return
# all nodes initially are unvisited
# all distances from source to a given node are infinity
# previous node in optimal path from source is None
unvisited, from_source, prev = set([]), {}, {}
for node in graph:
from_source[node] = float("inf")
prev[node] = None
unvisited.add(node)
# set known distance from source to source
from_source[source] = 0
while unvisited:
# pick unvisited node w/ least dist from source (greedy algo)
# COULD USE A MIN PRIORITY QUEUE HERE INSTEAD
shortest = float("inf")
for node, dist in from_source.items():
if node in unvisited and dist < shortest:
current = node
shortest = dist
# remove it from unvisited
unvisited.remove(current)
# found target; exit while loop
if current == target:
break
# examine each of its unvisited neighbors
# update distances (shorter paths update old)
for neighbor, edge_weight in graph[current].items():
if neighbor in unvisited:
temp_dist = from_source[current] + edge_weight
if temp_dist < from_source[neighbor]:
from_source[neighbor] = temp_dist
prev[neighbor] = current
# read shortest path from source to target by reverse iteration
path = []
if prev[target] or target == source:
while target:
path.append(target)
target = prev[target]
return path
if __name__ == '__main__':
# print(find_shortest_paths(graph, 5))
print(find_shortest_path(graph, 1, 4)) |
# Generated by Django 2.1.7 on 2019-03-13 18:58
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conference', '0002_auto_20190223_2201'),
]
operations = [
migrations.AlterField(
model_name='conference',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='conference',
name='key_dates',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True),
),
]
|
import os
import contextlib
from jove.AnimationUtils import *
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
from jove.DotBashers import *
from jove.Def_PDA import *
import ipywidgets as widgets
from ipywidgets import Layout
from IPython.display import display, clear_output, Javascript, HTML
from IPython.utils import io
from traitlets import Unicode, validate, List, observe, Instance
from graphviz import Source
class AnimatePDA:
'''
This is the PDA animation class.
Call it with the PDA to be animated, and also FuseEdges=True/False
to draw the PDA with edges either fused or not.
For producing drawings in Colab, it is important to have these in
every cell that calls animation.
AnimatePDA(myPDA, FuseEdges='True/False')
followed by
display(HTML('<link rel="stylesheet" href="//stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css"/>'))
Then the animation works in one's own install or Colab.
'''
def __init__(self, m_desc,
FuseEdges=False,
max_stack=30,
max_width=9.0,
accept_color='chartreuse3',
reject_color='red',
neutral_color='dodgerblue2'):
# Options
self.color_accept = accept_color
self.color_reject = reject_color
self.color_neutral = neutral_color
self.max_width = max_width
self.fuse = FuseEdges
# PDA specific options
self.condition = 'ACCEPT_F'
self.stack_size = 6
# initialize
self.valid_input = True
self.machine = m_desc
self.machine_obj = dotObj_pda(self.machine, FuseEdges=FuseEdges)
self.copy_source = reformat_edge_labels(set_graph_size(self.machine_obj.source, max_width))
# Set things we need for the animation
self.machine_steps = []
self.feed_steps = []
self.stack_steps = []
self.from_nodes = self.machine['q0']
self.to_nodes = self.machine['q0']
self.animated = False
self.is_back_step = False
# Setup the widgets
# Top row for user input
self.user_input = widgets.Text(value='',
placeholder='Sigma: {{{}}}'.format(','.join(sorted(self.machine['Sigma']))),
description='Input:',
layout=Layout(width='500px')
)
self.user_input.observe(self.on_input_change, names='value')
self.generate_button = widgets.Button(description="Animate",
button_style='primary',
disabled=False
)
self.generate_button.on_click(self.generate_animation)
self.acceptance_toggle = widgets.Dropdown(options=[('State', 'ACCEPT_F'), ('Stack', 'ACCEPT_S')],
description='Acceptance:',
disabled=False,
layout=Layout(width='160px')
)
# Bottom row for player controls
self.play_controls = widgets.Play(interval=950,
value=0,
min=0,
max=100,
step=1,
description="Press play",
disabled=True
)
self.play_controls.observe(self.on_play_step, names='value')
self.speed_control = widgets.IntSlider(value=1,
min=1,
max=10,
step=1,
description='Speed:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
self.speed_control.observe(self.on_speed_change, names='value')
# Create the controls for stepping through the animation
self.backward = widgets.Button(icon='step-backward',
layout=Layout(width='40px'),
disabled=True
)
self.forward = widgets.Button(icon='step-forward',
layout=Layout(width='40px'),
disabled=True
)
self.backward.on_click(self.on_backward_click)
self.forward.on_click(self.on_forward_click)
# set the widget to display the machine
self.machine_display = widgets.Output()
with self.machine_display:
display(Source(self.copy_source))
# set a widget to display rejected output
self.rejection_display = widgets.Output()
self.rejection_text = widgets.HTML(value="")
self.reject_msg_start = '<p style="color:{}; text-align:center"><b>\'<span style="font-family:monospace">'.format(self.color_reject)
self.reject_msg_end = '</span>\' was REJECTED</b></br>(Try running with a larger stack size or changing acceptance)</p>'
# set the widget to display the stack
self.stack_display = widgets.Output()
s_state = self.set_stack_display()
with self.stack_display:
display(Source(s_state))
self.stack_size_slider = widgets.IntSlider(value=self.stack_size,
min=2,
max=max_stack,
step=1,
description='Stack Size:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
self.stack_size_slider.observe(self.on_stack_size_change, names='value')
# set the widget to display the feed
self.feed_display = widgets.Output()
f_state, inspecting = self.generate_feed('', 0, 0, [])
with self.feed_display:
display(Source(f_state))
self.path_dropdown = widgets.Dropdown(options={},
value=None,
description='Path:',
disabled=True,
layout=Layout(width='200px')
)
self.path_dropdown.observe(self.on_path_change, names='value')
# TODO: REMOVE TESTING CODE
self.test_output = widgets.Output()
# arrange the widgets in the display area
row1 = widgets.HBox([self.user_input, self.acceptance_toggle, self.generate_button])
row2 = widgets.HBox([self.stack_size_slider])
ms_disp = widgets.HBox([self.stack_display, self.machine_display])
play_row = widgets.HBox([self.path_dropdown, self.play_controls, self.backward, self.forward, self.speed_control])
w = widgets.VBox([row1, row2, self.rejection_display, ms_disp, self.feed_display, play_row, self.test_output])
display(w)
self.play_controls.disabled = True
self.forward.disabled = True
self.backward.disabled = True
self.speed_control.disabled = True
def on_speed_change(self, change):
self.play_controls.interval = 1000 - 50 * change['new']
def on_stack_size_change(self, change):
self.stack_size = change['new']
with self.stack_display:
clear_output(wait=True)
display(Source(self.set_stack_display()))
def on_input_change(self, change):
# check for valid user input
if not self.valid_user_input():
self.generate_button.button_style = 'danger'
self.generate_button.description = 'Invalid Input'
else:
self.generate_button.button_style = 'primary'
self.generate_button.description = 'Animate'
def on_path_change(self, change):
self.play_controls._playing = False
new_path = change['new']
# make sure the there is an existing new path
if new_path is None:
return
self.play_controls.max = new_path[0]
self.machine_steps = new_path[1]
self.stack_steps = new_path[2]
self.feed_steps = new_path[3]
self.play_controls.value = 0
def on_backward_click(self, b):
self.play_controls._playing = False
self.is_back_step = True
self.play_controls.value -= 1
def on_forward_click(self, b):
self.play_controls._playing = False
self.play_controls.value += 1
def generate_animation(self, change):
if self.animated: # switching to input mode
# enable the input controls
self.play_controls._playing = False
self.animated = False
self.user_input.disabled = False
# self.alternate_start.disabled = False
self.stack_size_slider.disabled = False
self.acceptance_toggle.disabled = False
# update the button to switch between modes
self.generate_button.description='Animate'
self.generate_button.button_style = 'primary'
# disable the play controls
self.play_controls.disabled = True
self.forward.disabled = True
self.backward.disabled = True
self.speed_control.disabled = True
self.path_dropdown.disabled = True
self.path_dropdown.index = None
# clean the machine display
with self.machine_display:
clear_output(wait=True)
display(Source(self.copy_source))
with self.stack_display:
clear_output(wait=True)
display(Source(self.set_stack_display()))
with self.feed_display:
clear_output(wait=True)
display(Source(self.generate_feed('', 0, 0, [])[0]))
with self.rejection_display:
clear_output()
else: # switching to play mode
# ignore invalid input
if not self.valid_user_input():
return
# disable the input controls
self.animated = True
self.user_input.disabled = True
# self.alternate_start.disabled = True
self.stack_size_slider.disabled = True
self.acceptance_toggle.disabled = True
self.generate_button.description='Change Input'
# clean the current play displays
self.feed_steps = []
self.stack_steps = []
self.machine_steps = []
# find the acceptance paths
a = ()
paths = []
touched = []
with io.capture_output() as captured:
a, paths, touched = run_pda(self.user_input.value,
self.machine,
acceptance=self.acceptance_toggle.value,
STKMAX=self.stack_size)
# if there are no acceptance paths we don't have any animations to build
if len(paths) == 0:
self.generate_button.button_style = 'danger'
rejected_machine = set_graph_color(self.copy_source, self.color_reject)
with self.rejection_display:
self.rejection_text.value = '{}{}{}'.format(self.reject_msg_start, self.user_input.value, self.reject_msg_end)
display(self.rejection_text)
with self.machine_display:
clear_output(wait=True)
display(Source(rejected_machine))
return
new_dropdown_options = {}
# generate all the display steps for each path
path_count = 1
for p in paths:
path_states = p[1].copy()
max_steps = (len(path_states))*2+1
path_states.append(p[0])
# generate the feed display
path_feed_steps = []
inspecting = ''
for step in range(max_steps):
feed_step, inspecting = self.generate_feed(inspecting, step, max_steps, path_states)
path_feed_steps.append(feed_step)
# generate the machine steps
path_obj_steps = []
for step in range(max_steps):
path_obj_steps.append(self.generate_machine_steps(path_states, step, max_steps))
# generate the stack steps
path_stack_steps = []
for step in range(max_steps):
if step == max_steps:
path_stack_steps.append(self.set_stack_display(path_states[-1][2]))
else:
path_stack_steps.append(self.set_stack_display(path_states[step//2][2]))
# add the path as an option in the dropdown
new_dropdown_options['Path {}'.format(path_count)] = (max_steps-1, path_obj_steps, path_stack_steps, path_feed_steps)
path_count += 1
# update the dropdown
self.path_dropdown.options = new_dropdown_options
self.path_dropdown.index = 0
# display the machine for this step
with self.machine_display:
clear_output(wait=True)
display(Source(self.machine_steps[0]))
# display the feed for this step
with self.feed_display:
clear_output(wait=True)
display(Source(self.feed_steps[0]))
# display the stack for this step
with self.stack_display:
clear_output(wait=True)
display(Source(self.stack_steps[0]))
# enable the controls
self.backward.disabled = True
if len(self.user_input.value) == 0:
self.forward.disabled = True
else:
self.forward.disabled = False
self.play_controls.disabled = False
self.speed_control.disabled = False
self.path_dropdown.disabled = False
def valid_user_input(self):
# make sure the input is valid
for c in self.user_input.value:
if c not in self.machine['Sigma']:
return False
return True
def on_play_step(self, change):
# set the step controls
if change['new'] == 0:
self.backward.disabled = True
self.forward.disabled = False
elif change['new'] == self.play_controls.max:
self.backward.disabled = False
self.forward.disabled = True
else:
self.backward.disabled = False
self.forward.disabled = False
# display the machine for this step
with self.machine_display:
clear_output(wait=True)
display(Source(self.machine_steps[change['new']]))
# display the feed for this step
with self.feed_display:
clear_output(wait=True)
display(Source(self.feed_steps[change['new']]))
# display the stack for this step
with self.stack_display:
clear_output(wait=True)
display(Source(self.stack_steps[change['new']]))
def generate_machine_steps(self, states, step, max_step):
# on first step reset start node
if step == 0:
self.from_nodes = self.machine['q0']
self.to_nodes = self.machine['q0']
#with self.test_output:
# print('starting from {}'.format({self.from_nodes}))
return color_nodes(self.copy_source, {self.from_nodes}, self.color_neutral)
# return self.set_node_display(set(self.from_nodes), self.color_neutral)
# on the last step check for acceptance type
if step == max_step-1:
if self.acceptance_toggle.value == 'ACCEPT_S':
# color whole graph green
return set_graph_color(self.copy_source, self.color_accept)
else:
# color just the final node green
return color_nodes(self.copy_source, {states[-1][0]}, self.color_accept)
# primary steps we are on a node
elif step % 2 == 0:
self.from_nodes = states[step//2][0]
node_display = color_nodes(self.copy_source, {self.from_nodes}, self.color_neutral)
return node_display
# secondary steps are choice steps
else:
inspecting = states[step//2][1]
if len(inspecting) == 0:
inspecting = ''
else:
inspecting = inspecting[0]
self.to_nodes = step_pda((self.from_nodes, inspecting, states[step//2][2]), [], self.machine)
return self.set_choice_display(step//2, self.copy_source, states[step//2][0], states[step//2+1][0], self.to_nodes, states, self.color_neutral)
def set_choice_display(self, step, m_state, src_node, dest_node, state_set, states, color):
ap = ''''
node_set = set([s[0][0] for s in state_set])
for n in node_set:
# determine the input part of the label (either front of input or '')
inspecting = states[step][1]
if len(inspecting) == 0:
inspecting = ''
else:
inspecting = inspecting[0]
stack = states[step][2]
# determine the deltas between the src and n
transitions = []
future_stacks = []
delta_keys = self.machine['Delta'].keys()
for k in delta_keys:
if k[0] == src_node and k[1] == inspecting and stack.startswith(k[2]):
results = self.machine['Delta'][k]
for r in results:
if r[0] == n:
popping = k[2]
pushing = r[1]
future_stacks.append(pushing + stack[len(popping):])
transitions.append(replace_special('{}, {} ; {}'.format(ap if inspecting == '' else inspecting, ap if popping == '' else popping, ap if pushing == '' else pushing)))
elif inspecting != '' and k[0] == src_node and k[1] == '' and stack.startswith(k[2]):
results = self.machine['Delta'][k]
for r in results:
if r[0] == n:
popping = k[2]
pushing = r[1]
future_stacks.append(pushing + stack[len(popping):])
transitions.append(replace_special('{}, {} ; {}'.format(ap, ap if popping == '' else popping, ap if pushing == '' else pushing)))
# style the edge label
if self.fuse:
label_start = m_state.find('=', m_state.find('\t{} -> {}'.format(src_node,n)))
label_end = m_state.find(']', label_start)
replacement = m_state[label_start+1:label_end]
for t in transitions:
replacement = replacement.replace(' {}'.format(t),'<font color="{}"> {}</font>'.format(color,t))
if n!= dest_node:
replacement += ' color="{}" arrowsize=1 penwidth=1 style=dashed'.format(color)
else:
replacement += ' color="{}" arrowsize=1.5 penwidth=2'.format(color)
m_state = m_state[:label_start+1] + replacement + m_state[label_end:]
else:
for t in range(len(transitions)):
label_start = m_state.find('=', m_state.find('\t{} -> {} [label=< {}>'.format(src_node,n,transitions[t])))
label_end = m_state.find(']', label_start)
label = m_state[label_start+1:label_end]
replacement = label.replace(' {}'.format(transitions[t]),'<font color="{}"> {}</font>'.format(color,transitions[t]))
if n!= dest_node or future_stacks[t] != states[step+1][2]:
replacement += ' color="{}" arrowsize=1 penwidth=1 style=dashed'.format(color)
else:
replacement += ' color="{}" arrowsize=1.5 penwidth=2'.format(color)
m_state = m_state[:label_start+1] + replacement + m_state[label_end:]
# style the ending node
if n != dest_node:
place = m_state.find(']', m_state.find('\t{} ['.format(n)))
m_state = m_state[:place] + ' fontcolor="{}" fillcolor=white color="{}" style=dashed penwidth=1'.format(color,color) + m_state[place:]
else:
place = m_state.find(']', m_state.find('\t{} ['.format(n)))
m_state = m_state[:place] + ' fontcolor="{}" color="{}" fillcolor=white style=filled penwidth=2'.format(color,color) + m_state[place:]
return m_state
def set_stack_display(self, contents=''):
on_stack = len(contents)
stack_string = 'digraph {{\n\tgraph [rankdir=LR size={}];\n\tnode [fontsize=12 width=0.35 shape=record];\n\tstack [label="'.format(self.max_width)
# visible stack
elements_string = ''
if on_stack == 0:
on_stack = 1
elements_string += '<top> '
else:
for i in range(on_stack-1,-1,-1):
elements_string = '{}|'.format(replace_special(contents[i])) + elements_string
if i == 0:
elements_string = '<top> ' + elements_string
for i in range(self.stack_size - on_stack):
elements_string = ' |' + elements_string
stack_string += elements_string[:-1] + '"]\n\tEmpty [width=0 penwidth=0 label="''"]\n\tEmpty -> stack:top\n}'
return stack_string
def generate_feed(self, inspecting, step, max_steps, states):
input_string = self.user_input.value
feed_string = ''
if step == 0:
feed_string = write_feed_source('', ' ', replace_special(input_string), self.max_width)
elif step == max_steps:
current_state = states[step//2]
prev_state = states[step//2-1]
if len(current_state[1]) != len(prev_state[1]):
inspecting = ''
endpoint = len(current_state[1])+len(inspecting)
if endpoint == 0:
feed_string = write_feed_source(replace_special(input_string), ' ', '', self.max_width)
else:
feed_string = write_feed_source(replace_special(input_string[:-endpoint]),
replace_special(' ' if inspecting is '' else inspecting),
replace_special(current_state[1]), self.max_width)
else:
current_state = states[step//2]
# at a node
if step % 2 == 0:
prev_state = states[step//2-1]
if len(current_state[1]) != len(prev_state[1]):
inspecting = ''
endpoint = len(current_state[1][len(inspecting):])+len(inspecting)
if endpoint == 0:
feed_string = write_feed_source(replace_special(input_string), ' ', '', self.max_width)
else:
feed_string = write_feed_source(replace_special(input_string[:-endpoint]),
replace_special(' ' if inspecting is '' else inspecting),
replace_special(current_state[1][len(inspecting):]), self.max_width)
# picking a path
else:
left = ''
if len(current_state[1]) == 0:
inspecting = ''
else:
inspecting = current_state[1][0]
right = current_state[1][len(inspecting):]
endpoint = len(right)+len(inspecting)
if endpoint == 0:
feed_string = write_feed_source(replace_special(input_string), ' ', '', self.max_width)
else:
feed_string = write_feed_source(replace_special(input_string[:-endpoint]),
replace_special(' ' if inspecting is '' else inspecting),
replace_special(right), self.max_width)
return feed_string, inspecting
print(''' "help(AnimatePDA)" gives you info on how to use animations with PDA ''')
|
#!/usr/bin/python
# Format of each line is:
# date\ttime\tstore name\titem description\tcost\tmethod of payment
#
# We want elements 2 (store name) and 4 (cost)
# We need to write them out to standard output, separated by a tab
import sys
import re
for line in sys.stdin:
url = ""
# first lets split by quotes
data = line.strip().split('"')
if len(data)!=3:
#misformed log- missing url
continue
else:# len(data)==3:
req=data[1].strip().split()
if len(req)!=3:
continue
url=req[1]
print "{0}\t{1}".format(url,"1")
|
'''
Created on Aug 27, 2016
@author: Burkhard
'''
MAIN_EMAIL = "pintchukandrey76@gmail.com" # "pinchukandreyurevich76@gmail.com"
GMAIL_PWD = "losharame76"
FROM_WHO = "Prapor"
|
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Students, Admins, Competitions, Notices
# Register your models here.
admin.site.register(Students)
admin.site.register(Admins)
admin.site.register(Competitions)
admin.site.register(Notices) |
# coding: utf-8
# vi: ft=python
DEBUG = DEVELOP_MODE = True
SENTRY_DSN = ''
SESSION_SECRET_KEY = 'NVzLYJSMyw'
SESSION_TTL = 24 * 3600
TRUSTED_HOSTS = '127.0.0.1,10.0.0.1'
ALLOW_ORIGINS = ['https://example.org', 'https://www.example.org'] # use ['*'] to allow any origin.
ALLOW_ORIGINS_REG = r"https://.*\.example\.org" # See https://www.starlette.io/middleware/#corsmiddleware for ref
SYSTEM_USER = 'admin'
SYSTEM_USER_PASSWORD = 'admin'
ADMIN_ROLES = ['admin', 'system_admin']
PARAM_FILLUP = {
# 'reason': 'hehe',
'ldap_id': lambda user: user.name,
}
# DATABASE_URL = 'sqlite:///tmp/helpdesk.db'
# postgres://user:pass@localhost/dbname
# mysql://user:pass@localhost/dbname
ENABLED_PROVIDERS = ('st2')
ST2_BASE_URL = 'https://st2.example.com'
ST2_API_KEY = None
ST2_CACERT = None
ST2_DEFAULT_PACK = ''
ST2_WORKFLOW_RUNNER_TYPES = ['action-chain', 'mistral-v2', 'orquesta']
OPENID_PRIVIDERS = {
'keycloak': {
'server_metadata_url': 'https://keycloak.example.com/realms/apps/.well-known/openid-configuration',
'client_id': 'helpdesk',
'client_secret': 'CLIENT_SECRET',
'scope': 'openid email profile',
},
'google': {
'server_metadata_url': 'https://accounts.google.com/.well-known/openid-configuration',
'client_id': 'CLIENT_ID',
'client_secret': 'CLIENT_SECRET',
'scope': 'openid email profile',
'client_kwargs': {
'proxies': {'all': 'http://localhost:3128'},
},
}
}
AUTHORIZED_EMAIL_DOMAINS = ['@example.com']
def oauth_username_func(id_token):
return id_token.get('preferred_username') or id_token['email'].split('@')[0]
# base url will be used by notifications to show web links
DEFAULT_BASE_URL = 'https://example.com'
ADMIN_EMAIL_ADDRS = 'admin@example.com,ops@example.com'
FROM_EMAIL_ADDR = 'helpdesk@example.com'
NOTIFICATION_TITLE_PREFIX = '[helpdesk] '
NOTIFICATION_METHODS = [
'helpdesk.libs.notification:MailNotification',
'helpdesk.libs.notification:WebhookNotification',
]
AUTO_APPROVAL_TARGET_OBJECTS = []
TICKETS_PER_PAGE = 50
def avatar_url_func(email):
import hashlib
GRAVATAR_URL = '//www.gravatar.com/avatar/%s'
return GRAVATAR_URL % hashlib.md5(email.encode('utf-8').lower()).hexdigest()
# Action Tree Config
# action name, description/tips, st2 pack/action
ACCOUNT_SUBTREE = [
'账号相关',
[
# ['', '', ''],
['申请服务器账号/重置密码', '申请 ssh 登录服务器的账号,或者重置密码', ''],
['申请创建分布式文件系统用户目录', '跑分布式计算脚本常用的前置条件', ''],
['申请加入用户组', '', ''],
]
]
PACKAGE_SUBTREE = [
'包管理相关',
[
# ['', '', ''],
['查询服务器上包版本', '可查询的信息有 ebuild 版本号、编译/部署时间,VCS 版本', ''],
['在部分机器上用 nobinpkg 测试包', '常用于在部分服务器上测试新版本,观察可用性与稳定性时', ''],
['build binpkg 并全量更新', '常用于使用 nobinpkg 测试完毕,可以上线到生产环境时', ''],
['将已有的 binpkg 装到指定机器', '常用于将当前稳定版本安装到之前并未部署此包的机器上时', ''],
['仅 build binpkg 而不安装', '此功能并不常用,请慎用,仅用于为即将被部署的包打 binpkg 时', ''],
['使用现有的 binpkg 全量更新', '常用于已在 binhost 上生成 binpkg 的大型软件包', ''],
['回滚包到指定 VCS 版本', '常用于将 9999 包回滚到某个 VCS 版本', ''],
]
]
ACTION_TREE_CONFIG = ['功能导航', [ACCOUNT_SUBTREE, PACKAGE_SUBTREE]]
|
"""
Storefront and Metadata Serializers
"""
import logging
from rest_framework import serializers
import ozpcenter.api.listing.serializers as listing_serializers
# Get an instance of a logger
logger = logging.getLogger('ozp-center.' + str(__name__))
class StorefrontSerializer(serializers.Serializer):
featured = listing_serializers.ListingSerializer(many=True)
recent = listing_serializers.ListingSerializer(many=True)
most_popular = listing_serializers.ListingSerializer(many=True)
|
import osr, os, affine, time
from gdalconst import *
from osgeo import gdal
def path_join(str):
py_path = os.path.dirname(os.path.realpath(__file__))
abs_path = os.path.join(py_path,str)
if os.path.isfile(abs_path):
return abs_path
else:
print('[Alarm] File not exist:{}\n'.format(abs_path ))
return abs_path
def retrieve_pixel_value(geo_coord, data_source):
"""Return floating-point value that corresponds to given point."""
x, y = geo_coord[0], geo_coord[1]
forward_transform = affine.Affine.from_gdal(*data_source.GetGeoTransform())
reverse_transform = ~forward_transform
px, py = reverse_transform * (x, y)
px, py = int(px + 0.5), int(py + 0.5)
pixel_coord = px, py
data_array = np.array(data_source.GetRasterBand(1).ReadAsArray())
# i = data_array[0]
# j = data_array[1]
return(data_array[pixel_coord[1]][pixel_coord[0]])
# retuen lon list and lat list from kml file
def get_lon_lat(file_in, file_out, data):
kml = []
with open(file_in, 'r', encoding='utf8') as origin:
with open(file_out, 'w', encoding='utf8') as out:
num = 0
for line in origin.readlines():
if num == 1 :
out.write('\t\t\t\t\t\t\t')
for j in line.split():
if j != '</LineString>':
xlon = float(j.split(',')[0])
xlat = float(j.split(',')[1])
print(xlon, xlat)
h = str(retrieve_pixel_value((xlon, xlat), data)+0.7)
out.write(j.split(',')[0] + ',' + j.split(',')[1] + ',' + h + ' ')
kml.append(j.split(',')[0] + ',' + j.split(',')[1] + ',' + h + '\n')
out.write('</LineString>')
num = 0
else:
out.write(line)
if line.find('<coordinates>') > 0:
num = 1
print('[OK] write kml.\n')
return kml
def pixelcoord(x, y):
"""Returns coordinates X Y from pixel"""
xp = a * x + b * y + minX
yp = d * x + e * y + minY
return xp, yp
def print_dem_value(file):
data = gdal.Open(file, GA_ReadOnly)
raster = data.GetRasterBand(1)
width = data.RasterXSize
height = data.RasterYSize
gt = data.GetGeoTransform()
minX = gt[0]
minY = gt[3] + width*gt[4] + height*gt[5]
maxX = gt[0] + width*gt[1] + height*gt[2]
maxY = gt[3]
# print ("the domain :" , "[" ,minX,";",maxX,"]","[", minY,";",maxY ,"]")
# showing a 2D image of the topo
# plt.imshow(data, cmap='gist_earth',extent=[minx, maxx, miny, maxy])
# plt.show()
# elevation 2D numpy array
elevation = raster.ReadAsArray()
a = gt[1]
b = gt[2]
d = gt[4]
e = gt[5]
for i in range(height):
for j in range(width):
xp = a * i + b * j + minX
yp = d * i + e * j + minY
if elevation[i][j] != -32767:
print(xp , yp, elevation[i][j])
def write_it(file, list):
with open(file, 'w') as f:
for line in list:
f.write(line)
def main():
start_time = time.time()
# kml_file = path_join('t.kml')
# kml_out = path_join('kml_add.kml')
# dem_file = path_join('dem_410_wgs84.tif')
# test_file = path_join('test.txt')
f = r'C:\Users\RSLAB\Desktop\臺北市北投區行義段一小段506等\20190314_臺北市北投區行義段一小段506、507、509、510、511、514地號等六筆土地宗祠新建工程水土保持計畫_wgs84.tif'
k0 = r'C:\Users\RSLAB\Desktop\臺北市北投區行義段一小段506等\doc.kml'
k1 = r'C:\Users\RSLAB\Desktop\臺北市北投區行義段一小段506等\add_doc.kml'
# print_dem_value(dem_file)
data = gdal.Open(f, GA_ReadOnly)
array = get_lon_lat(k0, k1, data)
# write_it(test_file, array)
print("--- %s seconds ---" % (time.time() - start_time))
main() |
'''
Created on Apr 9, 2013
@author: francis.horsman:gmail.com
'''
from newer.solver.strategies.StrategyDifficulty import StrategyDifficulty
from newer.solver.strategies.iStrategy import iStrategy
import os
import sys
import traceback
class StrategiesFactory(object):
def __init__(self, model, checkAbort, solved, unsolvable):
self._model = model
self._checkAbort = checkAbort
self._solved = solved
self._unsolvable = unsolvable
self._cache = StrategiesFactory.createStrategies(StrategiesFactory.loadStrategies(), self._model, self._checkAbort, self._solved, self._unsolvable)
def __call__(self):
# Get the strategies pipeline list ordered by difficulty (easiest first!)
pipeline = []
difficulties = self._cache.keys()
difficulties.sort()
for difficulty in difficulties:
for _name, strategy in self._cache[difficulty].items():
pipeline.append(strategy)
return pipeline
@staticmethod
def createStrategies(strategies, model, checkAbort, solved, unsolvable):
cache = StrategyDifficulty._getStrategyDict()
for s in strategies:
try:
if type(s)==type:
strategy = s(checkAbort, solved, unsolvable)
try:
name = strategy.name
except:
print "strategy has no name: %(S)s"%{"S":strategy}
raise
try:
difficulty = strategy.difficulty
except:
print "strategy has no difficulty, making 'unknown': %(S)s"%{"S":strategy}
difficulty = StrategyDifficulty.UNKNOWN
if difficulty not in cache.keys():
cache[difficulty] = {}
cache[difficulty][name] = strategy
except Exception, _e:
traceback.print_exc()
return cache
@staticmethod
def loadStrategies():
strategies = []
path = os.path.join(os.path.dirname(__file__), "impls")
modules = [f for f in os.listdir(path) if not os.path.isdir(f)==True
and os.path.splitext(f)[1]==".py"
and f[0].isupper()
# and f[0]!="X"
]
# Now load the modules from 'modules':
sys.path.insert(0, path)
for m in modules:
name = os.path.basename(m)
name = os.path.splitext(name)[0]
strategy = StrategiesFactory.importModule(name, name)
if iStrategy not in strategy.__bases__:
strategy = type(name, (strategy, iStrategy, object), {})
strategies.append(strategy)
return strategies
@staticmethod
def importModule(where, what):
_module = __import__(where, globals(), locals(), [what], -1)
_type = getattr(_module, what)
return _type
@staticmethod
def _importModuleName(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
if __name__ == '__main__':
sf = StrategiesFactory(None, None, None, None)
for i in sf():
print i
|
from machine import Pin
class Encoder(object):
def __init__(self, pin_clk, pin_dt ):
self.aflag = 0
self.bflag = 0
self._value = 0
self.encoderA = Pin(pin_clk, mode=Pin.IN, pull=Pin.PULL_UP )
self.encoderB = Pin(pin_dt, mode=Pin.IN, pull=Pin.PULL_UP )
trig = Pin.IRQ_RISING | Pin.IRQ_FALLING
self.encoderA.irq(handler=self.pin_handlerA, trigger=trig )
self.encoderB.irq(handler=self.pin_handlerB, trigger=trig )
def pin_handlerA(self, pin):
if ( self.encoderA.value() is 1 and self.encoderB.value() is 1 and self.aflag is 1 ):
self._value += 1
self.bflag = 0
self.aflag = 0
elif ( self.encoderA.value() is 1 ):
self.bflag = 1
def pin_handlerB(self, pin):
if ( self.encoderA.value() is 1 and self.encoderB.value() is 1 and self.bflag is 1 ):
self._value -= 1
self.bflag = 0
self.aflag = 0
elif ( self.encoderA.value() is 1 ):
self.aflag = 1
@property
def value(self):
return self._value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.