text stringlengths 38 1.54M |
|---|
print('i can only go to two countries now. QAQ')
conutries = ['American','Beww','Chian',]
conutries[2] = 'Dean'
conutries.insert(0,'Eeew')
conutries.insert(2,'Ftef')
conutries.append('Gdgb')
print(conutries)
print(conutries.pop(0) + ' Sorry, i am not going yet')
print(conutries.pop(1) + ' Sorry, i am not going yet')
print(conutries.pop(2) + ' Sorry, i am not going yet')
print(conutries.pop(0) + ' Sorry, i am not going yet')
print(conutries)
for i in conutries:
print('i am coming!!! ' + i)
del conutries[0]
del conutries[0]
print(conutries)
|
"""
@author: Kostas Hatalis
"""
import numpy as np
def set_coverage(experiment):
"""
Formulates the tau level to create equally spaced quantiles (0,1)
Arguments:
experiment(dict): n_PI number of PIs to calculate
Returns:
experiment(dict): N_tau (num of taus), and taus
"""
N_PI = experiment['N_PI']
if N_PI == 0: # test only median
tau = np.array([0.5])
else:
step = 1 / (2 * N_PI + 1)
tau = np.array(np.arange(step, 1.0, step))
# can also also custom define taus here
# tau = np.arange(0.01, 1.0, 0.01)
# tau = np.array([0.025, 0.975])
# N_PI =1
N_tau = len(tau)
experiment['tau'] = tau
experiment['N_tau'] = N_tau
experiment['N_PI'] = N_PI
return experiment |
"""Beam search implementation in PyTorch."""
#
#
# hyp1#-hyp1---hyp1 -hyp1
# \ /
# hyp2 \-hyp2 /-hyp2#hyp2
# / \
# hyp3#-hyp3---hyp3 -hyp3
# ========================
#
# Takes care of beams, back pointers, and scores.
# Code borrowed from PyTorch OpenNMT example
# https://github.com/pytorch/examples/blob/master/OpenNMT/onmt/Beam.py
import torch
# try:
# from layers.sentencedecoder import SentenceDecoder
# except:
# from sentencedecoder import SentenceDecoder
import torch
import torch.nn as nn
from torch.autograd import Variable
import layers.utils as utils
from layers.wordspretrained import PretrainedEmbeddings
class Beam(object):
"""Ordered beam of candidate outputs."""
def __init__(self, size, vocab_bosindex, vocab_eosindex, alpha, cuda=False,
min_length=5):
"""Initialize params."""
self.size = size
self.done = False
self.pad = vocab_eosindex
self.bos = vocab_bosindex
self.eos = vocab_eosindex
self.alpha = alpha
self.min_length = min_length
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size).fill_(self.pad)]
self.nextYs[0][0] = self.bos
# The attentions (matrix) for each time.
self.attn = []
# Get the outputs for the current timestep.
def get_current_state(self):
"""Get state of beam."""
return self.nextYs[-1]
# Get the backpointers for the current timestep.
def get_current_origin(self):
"""Get the backpointer to the beam at this step."""
return self.prevKs[-1]
# Given prob over words for every last beam `wordLk`: Compute and update
# the beam search.
#
# Parameters:
#
# * `wordLk`- probs of advancing from the last step (K x words)
#
# Returns: True if beam search is complete.
def advance(self, workd_lk):
"""Advance the beam."""
num_words = workd_lk.size(1)
# Sum the previous scores and normalized by length
if len(self.prevKs) > 0:
sentence_length = len(self.nextYs)
beam_lk = workd_lk/sentence_length**self.alpha \
+ self.scores.unsqueeze(1).expand_as(workd_lk) \
* (sentence_length-1)**self.alpha \
/ sentence_length**self.alpha
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i].data[0] == self.eos:
beam_lk[i] = -1e20
else:
beam_lk = workd_lk[0]
flat_beam_lk = beam_lk.view(-1)
bestScores, bestScoresId = flat_beam_lk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = bestScoresId / num_words
self.prevKs.append(prev_k)
self.nextYs.append(bestScoresId - prev_k * num_words)
# End condition is when top-of-beam is EOS.
if self.nextYs[-1][0].data[0] == self.eos:
self.done = True
return self.done
def sort_best(self):
"""Sort the beam."""
return torch.sort(self.scores, 0, True)
# Get the score of the best in the beam.
def get_best(self):
"""Get the most likely candidate."""
scores, ids = self.sort_best()
return scores[0], ids[0]
# Get the score of the best in the beam.
def get_best_k(self):
k = self.size
"""Get the most likely candidate."""
scores, ids = self.sort_best()
return scores[:k], ids[:k]
# Walk back to construct the full hypothesis.
#
# Parameters.
#
# * `k` - the position in the beam to construct.
#
# Returns.
#
# 1. The hypothesis
def get_hyp(self, k):
"""Get hypotheses."""
hyp = []
for j in range(len(self.prevKs) - 1, -1, -1):
hyp.append(self.nextYs[j + 1][k])
k = self.prevKs[j][k]
return hyp[::-1]
if __name__=="__main__":
pretrainedEmbeddings = PretrainedEmbeddings({"word_embeddings" : torch.randn(10,3),
"pretrained_embdim" : 3,
"vocabulary_size":10,
"embeddings_requires_grad": False})
dict_args = {
'input_dim' : 3, #pretrainedEmbeddings.pretrained_embdim
'rnn_hdim' : 3,
'rnn_type' : 'LSTM',
'vocabulary_size' : pretrainedEmbeddings.vocabulary_size,
'tie_weights' : True,
'word_embeddings' : pretrainedEmbeddings.embeddings.weight,
'vocabulary_bosindex': 1,
'vocabulary_eosindex': 0,
'pretrained_words_layer': pretrainedEmbeddings
}
sentenceDecoder = SentenceDecoder(dict_args)
osequence = sentenceDecoder(Variable(torch.randn(2,3,3)), Variable(torch.randn(2,3)), Variable(torch.LongTensor([[1,1,1],[1,0,0]])))
class Vocab(object):
def __init__(self):
self.word2index = {'<eos>': 0, '<bos>': 1, 'this': 2, 'is': 3, 'a': 4, 'test': 5,
'of': 6, 'the': 7, 'beam': 8, 'search': 9}
self.index2word = {0: '<eos>', 1: '<bos>', 2:'this', 3: 'is', 4: 'a', 5: 'test',
6: 'of', 7: 'the', 8: 'beam', 9: 'search'}
vocab = Vocab()
beam = Beam(3, vocab_eosindex = 0, vocab_bosindex=1, alpha=0.7, cuda=False)
import random
i = random.randint(0,1)
beam.advance(osequence[i])
beam.get_hyp(0)
|
#! -*- coding: utf-8 -*-
'''
Created on Aug 22, 2011
@author: flyxian
'''
import transmissionrpc
import globals
#globals.init_configs("test_config.yaml")
#print globals.trconfig
tc = None
#create rpc connection to transmission
def connect():
global tc
tc = transmissionrpc.Client(globals.trconfig["host"],
port=globals.trconfig["port"],
user=globals.trconfig["user"],
password=globals.trconfig["password"])
globals.write_log(0,
"Connected to Transmission RPC server at %s:%d"
% (globals.trconfig["host"], globals.trconfig["port"]))
def download_seed(seed_info):
global tc
try:
if globals.site_config["torrent_file"]:
tc.add(seed_info["seed"])
else:
if globals.site_config["backup_site"]:
tc.add_uri(seed_info["seed"])
else:
tc.add_uri(seed_info["magnet"])
except transmissionrpc.error.TransmissionError, e:
globals.write_log(1,
e.message,
"Error :\t%s" % seed_info["title"],
"\t%s" % seed_info["post"],
"\t%s" % seed_info["seed"][-41:],
"\t%s" % seed_info["magnet"][:52])
return
globals.write_log(0,
"Add :\t%s" % seed_info["title"],
"\t%s" % seed_info["post"],
"\t%s" % seed_info["seed"][-40:],
"\t%s" % seed_info["magnet"][:52])
if __name__ == "__main__":
print "transmission_control: Hello world."
connect()
tc.list()
|
import os
import argparse
from typing import Dict
from alarm import Alarm
import httplib2
import dateutil.parser
import datetime
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import tzlocal
scopes = 'https://www.googleapis.com/auth/calendar.readonly'
client_secret_file = 'client_secret.json'
application_name = 'PiAlarm'
calendar_id = "u38uqb2rt2fr3drka35jopmsho@group.calendar.google.com"
event_id = "PiAlarm Wake"
iso_8601_suffix = "T00:00:00Z"
def get_next_alarm() -> (bool, Alarm):
try :
event = __get_gcal_events()
except Exception as e:
print(e)
return False, None
if not event :
return False, None
start_time = __parse_start_time(event[0])
# Event will only hold one value because the query will only get the
# first event. It is maintained as a list just in case the query is modified
# to return more than one event
return True, __utc_to_alarm(start_time)
def __get_gcal_events() -> Dict:
service = __get_service()
now = datetime.datetime.now(tzlocal.get_localzone()).isoformat("T")
week_from_now = str(datetime.datetime.today().date() + datetime.timedelta(weeks=1)) + iso_8601_suffix
query = __query_calendar(service, now, week_from_now)
events = query["items"]
return events
def __query_calendar(service, time_start: str, time_end: str):
result = service.events().list(
calendarId=calendar_id, timeMin=time_start, timeMax=time_end, maxResults=1,
singleEvents=True, orderBy='startTime', q=event_id).execute()
return result
def __parse_start_time(event: Dict) -> str:
return event["start"].get("dateTime", event["start"].get("date"))
def __utc_to_alarm(utc_datetime: str) -> Alarm:
time = dateutil.parser.parse(utc_datetime)
return Alarm(time.year, time.month, time.day, time.hour, time.minute)
def __get_credentials():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'calendar-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(client_secret_file, scopes)
flow.user_agent = application_name
flags = tools.argparser.parse_args(args=[])
if flags:
credentials = tools.run_flow(flow, store, flags)
return credentials
def __get_service():
credentials = __get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
return service
|
from rest_framework.response import Response
from rest_framework import status
#centralized responces for all the APIs for this app (users)
#is used for internationalization of responses
def getResponce(*args):
responces = {
"en" : {
"login_invalid_credentials" : Response({'error': "Invalid credentials, try again"}, status=status.HTTP_200_OK),
"signup_name_required" : Response({'error': "First & last name are reqired"}, status=status.HTTP_200_OK),
"signup_no_firstname" : Response({'error': "First name is reqired"}, status=status.HTTP_200_OK),
"signup_no_lastname" : Response({'error': "Last name is reqired"}, status=status.HTTP_200_OK),
"signup_no_password" : Response({'error': "Input Password"}, status=status.HTTP_200_OK),
"signup_short_password" : Response({'error': "Password must be atlest 5 characters"}, status=status.HTTP_200_OK),
"signup_password_not_match" : Response({'error': "Password does not mach"}, status=status.HTTP_200_OK),
"account_not_active" : Response({'error': "Your account is not activated, please contact admin."}, status=status.HTTP_200_OK),
"username_unavaiable" : Response({'error': "This username is not available"}, status=status.HTTP_200_OK),
"email_not_sent" : Response({'error': "Couldn't send email, please try again."}, status=status.HTTP_200_OK),
"invalid_email" : Response({'error': "No account associated with this email."}, status=status.HTTP_200_OK),
},
}
if responces.get(args[0]) != None: #ISO Code Exists
return responces[args[0]][args[1]]
else:
#ISO code not supported, end user will never face this error, just for exception handling in development.
return Response({'error': "Invalid ISO code, supported codes are- 'en'"}, status=status.HTTP_400_BAD_REQUEST) |
import os
import numpy as np
import pandas as pd
def out_result(predicted_list, gt_lst, path="./result/testset_result.csv"):
"""
output a result file
:param predicted_list:
:param gt_lst:
:param path:
:return:
"""
col = ['predicted', 'groundtruth']
arr = np.array([list(predicted_list), gt_lst.values.ravel()])
df = pd.DataFrame(arr.T, columns=col)
mkdirs_if_not_exist('./result/')
df.to_csv(path, index=False, encoding='UTF-8')
def mkdirs_if_not_exist(dir_name):
"""
make directory if not exist
:param dir_name:
:return:
"""
if not os.path.isdir(dir_name) or not os.path.exists(dir_name):
os.makedirs(dir_name)
|
import requests
from bs4 import BeautifulSoup
import json
# The main function of this .py document is to crawl and scrap data from WWF website
# Ideally, you may need to wait for about 30s- 1min for all html information being scrapped into local file
# You only need to run this .py document once, and you can move to finalproj_part2.py for more functions
# Enjoy your trip to learn about wildlife! :)
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
# >>>>>>>>>>>>>>>>>> 1. Cral and .py <<<<<<<<<<<<<<<<<
# ------------------ 1.1 Preparation ------------------
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
CACHE_FNAME="wwf_species.json"
try:
cache_file = open(CACHE_FNAME,"r")
cache_contents = cache_file.read()
CACHE_DICTION = json.loads(cache_contents)
cache_file.close()
except:
CACHE_DICTION={}
# ------------------ 1.2 Make Cache Storing All Species Brief Intro and Specific Url to Details Page ------------------
print ("\n ********* PART 1*********")
print ("WWF - Get Species' Info")
# ------------------ (1) "download" species information from species directory page ------------------
baseurl_1="https://www.worldwildlife.org"
basehtml_1=requests.get(baseurl_1).text
soup_1=BeautifulSoup(basehtml_1,"html.parser")
result_1=soup_1.find_all(class_="view-all")
baseurl_2=""
for each in result_1:
if "View species" in each.text:
baseurl_2=each["href"]
# ------------------ (2) go inside species page with full directory ------------------
basehtml_2=requests.get(baseurl_2).text
soup_2=BeautifulSoup(basehtml_2,"html.parser")
result_2=soup_2.find(class_="span4 ad card-species")
add_find=result_2.find("a")
add_url=add_find["href"]
allspecies_1=baseurl_1+add_url
# ------------------ (3) get common name, scientific name, and conservation status, as well as href for specific species ------------------
basehtml=requests.get(allspecies_1).text
soup_all=BeautifulSoup(basehtml,"html.parser")
species_intro=soup_all.find("tbody")
species=species_intro.find_all("tr")
for each in species:
name_each=each.find("a").text
url_each=each.find("a")["href"]
sci_name=each.find("em").text
td_ls=[]
for every in each:
if every.string != "\n":
td_ls.append(every.string)
else:
pass
conservation_status=td_ls[2]
if name_each not in CACHE_DICTION:
CACHE_DICTION[url_each]={"name":name_each,"scientific name":sci_name,"conservation status":conservation_status}
# ------------------ (4) check if one can go to next page ------------------
### if one can go to next page, find species information, if not, stop
next_one=soup_all.find("a",{"rel":"next"})
while next_one is not None:
next_page=next_one["href"]
next_url=baseurl_1+next_page
next_html=requests.get(next_url).text
soup_all=BeautifulSoup(next_html,"html.parser")
species_intro=soup_all.find("tbody")
species=species_intro.find_all("tr")
for each in species:
name_each=each.find("a").text
url_each=each.find("a")["href"]
sci_name=each.find("em").text
td_ls=[]
for every in each:
if every.string != "\n":
td_ls.append(every.string)
else:
pass
conservation_status=td_ls[2]
if name_each not in CACHE_DICTION:
CACHE_DICTION[url_each]={"name":name_each,"scientific name":sci_name,"conservation status":conservation_status}
next_one=soup_all.find("a",{"rel":"next"})
dumped_json_cache=json.dumps(CACHE_DICTION)
fw=open(CACHE_FNAME,"w")
fw.write(dumped_json_cache)
fw.close()
# >>>>>>>>>>>>>>>>>> 1. END <<<<<<<<<<<<<<<<<
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
# >>>>>>>>>>>>>>>>>> 2. Get Information from Each Details Page <<<<<<<<<<<<<<<<<
# ------------------ 2.1 define a function to find information include places, habitats, population,weight, length ------------------
def find_details(urladd):
detailsurl=baseurl_1+urladd
details_ls=[]
details_html=requests.get(detailsurl).text
soup_all=BeautifulSoup(details_html,"html.parser")
#find introduction
intro=soup_all.find("div",{"class":"lead gutter-bottom-6 medium-add-gutter wysiwyg"})
more_details=soup_all.find("ul",{"class":"list-bordered list-bordered-items list-labeled"})
print (detailsurl)
if intro is not None:
#find place and habitat
try:
place_habit_details=soup_all.find("ul",{"class":"list-data list-spaced"})
place_habit=[] #in place-habit sequence
li_ph=place_habit_details.find_all("li")
for each in li_ph:
detail=each.find(class_="lead").text
if detail != "\n":
place_habit.append(detail)
#=find height, weight and details of habitats
more_details=soup_all.find("ul",{"class":"list-data list-stats list-items"})
mht_wt={}
li_more=more_details.find_all("li")
for each in li_more:
title=each.find("strong",{"class":"hdr"}).text
content=each.find("div",{"class":"container"}).text
mht_wt[title]=content
except:
place_habit=[]
mht_wt={}
elif more_details is not None:
try:
place_habit=["",""]
mht_wt={}
li_more=more_details.find_all("li")
for each in li_more:
title=each.find("strong",{"class":"label"}).text
content=each.find("div",{"class":"container"}).text
if title in ["POPULATION","HABITATS","HEIGHT","WEIGHT"]:
mht_wt[title]=content
except:
place_habit=[]
mht_wt={}
else:
place_habit=[]
mht_wt={}
place_habit_new=[]
mht_wt_new={}
### get rid of \n
for each in mht_wt:
each_clean=mht_wt[each].strip()
mht_wt_new[each]=each_clean
for each in place_habit:
each_clean=each.strip()
place_habit_new.append(each_clean)
try:
mht_wt_new["Place"]=place_habit_new[0]
except:
mht_wt_new["Place"]="None"
try:
mht_wt_new["General Habitat"]=place_habit_new[1]
except:
mht_wt_new["General Habitat"]="None"
### check all information inside the dict and make those empty values into "None"
#### 01.check Status
for every in ["Status","Population","Scientific Name","Height","Weight","Length","Habitats","Place","General Habitat"]:
if every not in mht_wt_new:
mht_wt_new[every]="None"
else:
pass
### check those rough numbers and get only numbers instead of "over 800 pounds"
return mht_wt_new
# ------------------ 2.2 save details information into a local json file in a clear structure for future use ------------------
CACHE_FNAME2="species_detail.json"
try:
cache_file2 = open(CACHE_FNAME2,"r")
cache_contents2 = cache_file2.read()
CACHE_DICTION2 = json.loads(cache_contents2)
cache_file2.close()
except:
CACHE_DICTION2={}
with open(CACHE_FNAME) as more_fw:
species_dict=json.loads(more_fw.read())
for each in species_dict:
if species_dict[each]["name"] not in species_dict:
result = find_details(each)
CACHE_DICTION2[species_dict[each]["name"]]=result
else:
pass
dumped_json_cache2=json.dumps(CACHE_DICTION2)
fw=open(CACHE_FNAME2,"w")
fw.write(dumped_json_cache2)
fw.close()
# >>>>>>>>>>>>>>>>>> 2. END <<<<<<<<<<<<<<<<<
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
########## ........................ ヾ(・∀・*)♪゚ ............................ ##########
# >>>>>>>>>>>>>>>>>> PLEASE OPEN "finalproj_part2.py" TO EXPLORE MORE <<<<<<<<<<<<<<<<<
|
import tushare as ts
from .StockTicket import *
class StockData(object):
def __init__(self, data):
print('StockData:',data)
ts.set_token('2a7e5987596b91c995bfaa15b9b0de0c3947ee7fd76d6dbc06e577d8')
self.tick_ = StockTicket('300073',0,0,'','')
self.proj_ = ts.pro_api()
def get_history_k_data(self):
pass
def test(self):
print('stock data interface test')
|
import streamlit as st
import pandas as pd
import numpy as np
import pydeck as pdk
import plotly.graph_objects as go
import plotly.express as px
## Datasets
crime = pd.read_csv('data/crime_cleaned.csv')
victim_donut_data = pd.read_csv('data/victims_donut_data.csv', index_col = 0)
pop_area_count = pd.read_csv('data/pop_area_count.csv', index_col = 0)
crime_bar = pd.read_csv('data/la_crime_data.csv')
crime_type = pd.read_csv('data/crime_type.csv', index_col = 0)
crimeAreaDF = pd.read_csv('data/crimeDate_crimeTypes.csv', index_col = 0)
pop_data = pd.read_csv('data/Population_HV.csv')
crime_type['Date'] = pd.to_datetime(crime_type.Date)
## Functions
def map(data, lat, lon, zoom):
st.write(pdk.Deck(
map_style="mapbox://styles/mapbox/light-v9",
initial_view_state={
"latitude": lat,
"longitude": lon,
"zoom": zoom,
"pitch": 50,
},
layers=[
pdk.Layer(
"HeatmapLayer", #'' HexagonLayer
data=data,
get_position=["LON", "LAT"],
radius=100,
elevation_scale=20,
elevation_range=[0, 1000],
pickable=True,
extruded=True,
coverage = 1
),
]
))
def ring(selected):
data = victim_donut_data[victim_donut_data['AREA NAME'] == selected]
labels = data['Vict Descent written']
values = data['Count']
layout =dict(#title=dict(text=selected),
legend = dict(orientation = 'h',
y = -0.15))
# Use `hole` to create a donut-like pie chart
fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.6)], layout=layout)
return fig
def bar_chart(selected):
data = pop_area_count.loc[[selected,'Total Los Angeles'],['Population (approx.)', 'Avg. yearly crime count']]
data['Avg. number of reported crimes per inhabitants per year'] = data['Avg. yearly crime count'] / data['Population (approx.)']
data = data.reset_index()
data = data.rename(columns={'Area name':'Area'})
title = selected
fig = px.bar(data, x='Area', y='Avg. number of reported crimes per inhabitants per year')#, title=title)
return fig
def most_affected_area(data, year):
# Apply the year filter
data_selection = data[data['Year'] == year]
# Define variables
most_affected_area = data_selection.groupby('Area Name').count().sort_values('Year', ascending = False).index[0]
crimes_occured_most = data_selection.groupby('Area Name').count().sort_values('Year', ascending = False)['Year'][0]
# Plot it
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number",
value = crimes_occured_most,
number={"font":{"size":40}},
title = {"text": f"Area with the highest number<br>of crimes in {year_selected}<br><br><span style='font-size:1.8em;color:gray'>{most_affected_area}</span>"}
))
return fig
def crimes_occured_delta(data, year):
# Define the variables
crimes_occur_selected = len(data[(data['Year'] == year)])
crimes_occur_bf_selected = len(data[(data['Year'] == year-1)])
# Plot it
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = crimes_occur_selected,
number={"font":{"size":40}}
))
fig.update_layout(
template = {'data' : {'indicator': [{
'title': {'text': f"Number of Occured Crimes in {year}<br>compared to previuos year"},
'delta' : {'reference': crimes_occur_bf_selected,
'decreasing.color' : 'green',
'increasing.color' : 'red'}}]
}})
return fig
def least_affected_area(data, year):
# Apply the year filter
data_selection = data[data['Year'] == year]
# Define dataframe for this plot
# Define variables
most_affected_area = data_selection.groupby('Area Name').count().sort_values('Year', ascending = False).index[-1]
crimes_occured_most = data_selection.groupby('Area Name').count().sort_values('Year', ascending = False)['Year'][-1]
# Plot it
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number",
value = crimes_occured_most,
number={"font":{"size":40}},
title = {"text": f"Area with the lowest number<br>of crimes in {year_selected}<br><br><span style='font-size:1.8em;color:gray'>{most_affected_area}</span>"}
))
return fig
def most_affected_year(data, area):
# Apply the year filter
data_selection = data[data['Area Name'] == area]
# Define variables
most_affected_year = data_selection.groupby('Year').count().sort_values('Area Name', ascending = False).index[0]
crimes_occured_most_year = data_selection.groupby('Year').count().sort_values('Area Name', ascending = False).iloc[0]['Area Name']
# Plot it
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number",
value = int(crimes_occured_most_year),
number={"font":{"size":40}},
title = {"text": f"Year with highest number<br> of crimes in {area_selected}<br><br><span style='font-size:1.8em;color:gray'>{most_affected_year}</span>"}
))
return fig
def population_percentage(data, area):
# Get the numeric value of population
selected_area_popu = data[data['Area name']==area].iloc[0, 1]
# Get the corresponding percentage
selected_area_percent_popu = data[data['Area name']==area].iloc[0, -1]
# Plot it
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number",
value = int(selected_area_popu),
number={"font":{"size":40}},
domain = {'row': 0, 'column': 1},
title = {"text": f"{area} population<br><br><span style='font-size:1.8em;color:gray'>{selected_area_percent_popu}</span><br>"}
))
return fig
def barchart(year):
crime_sel = crime_type.groupby('Year').sum().query('Year == @year').T
data = dict(type='bar', x= crime_sel[year].sort_values(), y=crime_sel.index, orientation = 'h')
layout = dict( xaxis = dict(title = 'Count'))#, yaxis=dict(ticklabelposition = "inside right"))
fig = go.Figure(data=data, layout=layout)
fig.update_layout(dict(yaxis = dict(ticklabelposition = "inside right")))
fig.update_layout(autosize=False, width=2000, height=600)
return fig
def crime_line(year, area):
crime_type_Area = crime_type.query('Area == @area and Date.dt.year == @year')
# crime_type_Area = crime_type_Area[crime_type_Area['Date'].str.contains(year)]
crimes_list = ['Agravated assault', 'Burglary','Burglary from vehicle', 'Intimate partner assault', 'Simple assault',
'Small theft (under 950$)', 'Stolen vehicle', 'Teft of identity','Vandalism (felony)',
'Vandalism (misdeameanor)']
crimes_to_hide = ['Agravated assault', 'Intimate partner assault', 'Teft of identity', 'Vandalism (felony)', 'Vandalism (misdeameanor)']
data = [dict(type = 'scatter',
x = crime_type_Area['Date'],
y = crime_type_Area[crime],
name=crime)for crime in crimes_list]
for d in data:
if d.get('name') in crimes_to_hide:
d['visible'] = 'legendonly'
layout = dict(yaxis = dict(title = 'Count'), xaxis = dict(title = 'Date'))
fig = go.Figure(data=data, layout=layout)
return fig
## Layout
st.title('Crime in Los Angeles')
st.subheader('Interactive visual analysis of Los Angeles crimes from 2010 though 2019')
st.write('')
st.write('')
st.write('')
## Sidebar
year_selected= st.slider("Select a year", min(crime.Year), max(crime.Year))
crime = crime.query("Year == @year_selected")
## First Row
left1, middle1, right1 = st.beta_columns((0.3,0.3,0.3))
la_area = least_affected_area(crime_bar, year_selected)
ma_area = most_affected_area(crime_bar, year_selected)
co_delta = crimes_occured_delta(crime_bar, year_selected)
with left1:
st.plotly_chart(la_area, use_container_width= True)
with middle1:
st.plotly_chart(ma_area, use_container_width= True)
with right1:
st.plotly_chart(co_delta, use_container_width= True)
## Second Row
left2, spacer, right2 = st.beta_columns((1, 0.1, 1.2))
midpoint = (np.average(crime["LAT"]), np.average(crime["LON"]))
barchart = barchart(year_selected)
with left2:
st.header("Map of Los Angeles crimes in %i" % (year_selected))
st.write('')
map(crime, midpoint[0], midpoint[1], 8.5)
with right2:
st.header("Top 10 most occurent cryme types in %i" % (year_selected))
st.plotly_chart(barchart, use_container_width = True)
# Expander Selector
areas = ['Newton',
'Pacific',
'Hollywood',
'Central',
'Northeast',
'Hollenbeck',
'Southwest',
'Southeast',
'Rampart',
'Olympic',
'Wilshire',
'77th Street',
'Harbor',
'West LA',
'Van Nuys',
'West Valley',
'Mission',
'Topanga',
'N Hollywood',
'Foothill',
'Devonshire']
expander = st.beta_expander('Select a Neighborhood')
area_selected= expander.selectbox('', areas)
st.write('')
st.write('')
st.write('')
## Third row
left3, spacer, right3 = st.beta_columns((0.3, 0.1, 1))
ma_year = most_affected_year(crime_bar, area_selected)
pop_perc = population_percentage(pop_data, area_selected)
with left3:
st.plotly_chart(ma_year, use_container_width = True)
# st.plotly_chart(pop_perc, use_container_width = True)
lc = crime_line(year_selected, area_selected)
with right3:
st.header(f'Top 10 most occured cryme types in {area_selected} and their evolution through {year_selected}.')
st.plotly_chart(lc, use_container_width = True)
## Fourth Row
left4, spacer, right4 = st.beta_columns((1, 0.1, 1))
ring = ring(area_selected)
bar = bar_chart(area_selected)
with left4:
st.header(f'Crime rate in {area_selected} compared to the Los Angeles average')
st.plotly_chart(bar, use_container_width= True)
with right4:
st.header(f'Ethnicity distribution in {area_selected}')
st.plotly_chart(ring, use_container_width = True)
|
#conversor de Fahrenheit para Celsius
temperaturaFahrenheit = input("Insira a temperatura em Fahrenheit: ")
temperaturaCelsisus = 5 * (float(temperaturaFahrenheit) - 32) / 9
print(" Temperatura em Celsius:", temperaturaCelsisus)
|
#!/usr/bin/env python
# coding: utf-8
# In[10]:
fibonacci = [1, 1] # Desinated first 2 item of the Serial
[fibonacci.append(fibonacci[i] + fibonacci[i+1]) for i in range(8)] # Change the number 8 to see more item in Serial
print(fibonacci)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.ActivityConsultInfo import ActivityConsultInfo
class AlipayMarketingCampaignUserVoucherConsultResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingCampaignUserVoucherConsultResponse, self).__init__()
self._activity_consult_list = None
@property
def activity_consult_list(self):
return self._activity_consult_list
@activity_consult_list.setter
def activity_consult_list(self, value):
if isinstance(value, list):
self._activity_consult_list = list()
for i in value:
if isinstance(i, ActivityConsultInfo):
self._activity_consult_list.append(i)
else:
self._activity_consult_list.append(ActivityConsultInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayMarketingCampaignUserVoucherConsultResponse, self).parse_response_content(response_content)
if 'activity_consult_list' in response:
self.activity_consult_list = response['activity_consult_list']
|
import glob
from .. import nptipsyreader
import numpy as np
import pdb
import matplotlib.pyplot as plt
import matplotlib as mpl
def averageden():
gtpfiles = glob.glob('*.gtp')
gtpfiles.sort()
avgden = np.zeros(len(gtpfiles), dtype='float')
medianden = np.zeros(len(gtpfiles), dtype='float')
a = np.zeros(len(gtpfiles), dtype='float')
tipsyfile = ('.').join(gtpfiles[0].split('.')[0:4])
tipsy = nptipsyreader.Tipsy(tipsyfile)
tipsy._read_param()
plt.clf()
colors = iter(mpl.cm.gist_rainbow(np.linspace(0, 1, 13)))#len(gtpfiles))))
for i in range(len(gtpfiles)):
gtp = nptipsyreader.Tipsy(gtpfiles[i])
gtp._read()
mass = gtp.star['mass']
radius = gtp.star['eps']
den = mass/(4/3.*np.pi*radius**3)
avgden[i] = np.mean(den)
medianden[i] = np.median(den)
a[i] = gtp.t
pdb.set_trace()
if (gtp.t > 1/2.):
highmass = mass*np.float(tipsy.paramfile['dMsolUnit']) > 1.
plt.scatter(mass[highmass]*np.float(tipsy.paramfile['dMsolUnit']), den[highmass], color=next(colors),label='{:.2f}'.format(gtp.t))
#plt.hist(den, color=next(colors), bins=20., histtype='step', label='{:.2f}'.format(gtp.t), log=True)
plt.xscale('log')
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.5)
plt.xlabel('M$_{h}$ [M$_{\odot}$]', fontsize=15)#('density [rho crit]', fontsize='large')
plt.ylabel('rho crit', fontsize=15) #('logN', fontsize='large')
plt.title('cosmo6.25PLK Halo Densities', fontsize='large')
plt.show()
plt.savefig('densities_mass.png')
#plt.plot(a, beta)
#plt.plot(a, avgden/(beta/0.25)**3.)
#return avgden, medianden
|
# Solution of;
# Project Euler Problem 489: Common factors between two sequences
# https://projecteuler.net/problem=489
#
# Let G(a, b) be the smallest non-negative integer n for which gcd(n3 + b, (n
# + a)3 + b) is maximized. For example, G(1, 1) = 5 because gcd(n3 + 1, (n +
# 1)3 + 1) reaches its maximum value of 7 for n = 5, and is smaller for 0 ≤ n
# < 5. Let H(m, n) = ∑ G(a, b) for 1 ≤ a ≤ m, 1 ≤ b ≤ n. You are given H(5, 5)
# = 128878 and H(10, 10) = 32936544. Find H(18, 1900).
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 489
timed.caller(dummy, n, i, prob_id)
|
#set module language
import wrap_py
from wrap_py import _transl
_transl.set_lang("ru_RU")
# translator for module strings
from wrap_py._transl import translator as _
#translate window title
wrap_py.app.set_title(_(wrap_py.app.get_title()))
#configure wrap_py
wrap_py.make_nice_errors()
#prepare data source
import wds_files_general
ds = wds_files_general.source
wrap_py.site.sprite_data_sources.append(ds)
wrap_py.site.sprite_data_preload = False
#start in multithreaded mode
wrap_py.init() |
from sys import stdin,stdout
import heapq
def dijk(grid,costs):
r = len(grid)
c = len(grid[0])
dirs = [(0,1),(1,0),(-1,0),(0,-1)]
pq = [(grid[y][0],0,y) for y in range(r)]
for y in range(r):
costs[y][0] = grid[y][0]
heapq.heapify(pq)
while pq:
cur_cost,cur_x,cur_y = heapq.heappop(pq)
# print(cur_x,cur_y,cur_cost)
for n in dirs:
new_x = cur_x + n[1]
new_y = cur_y + n[0]
if 0 <= new_x < c and 0 <= new_y < r:
if costs[new_y][new_x] == -1:
cost = max(cur_cost,grid[new_y][new_x])
costs[new_y][new_x] = cost
heapq.heappush(pq,(cost,new_x,new_y))
def main():
r,c = map(int, stdin.readline().split())
grid = []
for _ in range(r):
grid.append([int(x) for x in stdin.readline().split()])
costs = [[-1]*c for x in range(r)]
dijk(grid,costs)
# print(grid)
# print(costs)
print(min([costs[y][c-1] for y in range(r)]))
if __name__ == "__main__":
main()
|
from numpy import cos, sin, sqrt, arctan, array
import cv2 as cv
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.round_point()
def distance(self, point):
return sqrt((self.x - point.x)**2 + (self.y - point.y)**2)
def translate_x(self, x):
self.x += x
self.round_point()
def translate_y(self, y):
self.y += y
self.round_point()
def update(self, x, y, theta):
self.translate_x(x)
self.translate_y(y)
self.rotate(theta)
def rotate(self, theta):
self.x = self.x * cos(theta) - self.y * sin(theta)
self.y = self.x * sin(theta) + self.y * cos(theta)
self.round_point()
def to_cylindrical(self):
r = sqrt(self.x**2 + self.y**2)
theta = arctan(self.y/self.x)
return (r, theta)
def from_cylindrical(self, r, theta):
self.x = r * cos(theta)
self.y = r * sin(theta)
self.round_point()
def round_point(self):
self.x = round(self.x)
self.y = round(self.y)
def to_tuple(self):
return (self.x, self.y)
def draw_point(self, frame):
cv.circle(frame, self.to_tuple(), radius=1,
color=(255, 0, 255), thickness=1)
def __str__(self):
return "X: {} Y: {} \n".format(self.x, self.y)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MemberDataUI.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MemberData(object):
def setupUi(self, MemberData):
MemberData.setObjectName("MemberData")
MemberData.setWindowModality(QtCore.Qt.ApplicationModal)
MemberData.resize(300, 600)
font = QtGui.QFont()
font.setFamily("Consolas")
MemberData.setFont(font)
self.saveInOutButton = QtWidgets.QPushButton(MemberData)
self.saveInOutButton.setGeometry(QtCore.QRect(100, 550, 100, 40))
self.saveInOutButton.setObjectName("saveInOutButton")
self.label_2 = QtWidgets.QLabel(MemberData)
self.label_2.setGeometry(QtCore.QRect(100, 409, 100, 25))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(MemberData)
self.label_3.setGeometry(QtCore.QRect(100, 480, 100, 25))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.inDateEdit = QtWidgets.QDateTimeEdit(MemberData)
self.inDateEdit.setGeometry(QtCore.QRect(50, 440, 200, 24))
self.inDateEdit.setAlignment(QtCore.Qt.AlignCenter)
self.inDateEdit.setObjectName("inDateEdit")
self.outDateEdit = QtWidgets.QDateTimeEdit(MemberData)
self.outDateEdit.setGeometry(QtCore.QRect(50, 510, 200, 24))
self.outDateEdit.setAlignment(QtCore.Qt.AlignCenter)
self.outDateEdit.setObjectName("outDateEdit")
self.nameLabel = QtWidgets.QLabel(MemberData)
self.nameLabel.setGeometry(QtCore.QRect(75, 10, 150, 31))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.nameLabel.setFont(font)
self.nameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel.setObjectName("nameLabel")
self.label_4 = QtWidgets.QLabel(MemberData)
self.label_4.setGeometry(QtCore.QRect(100, 60, 100, 25))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.emailEdit = QtWidgets.QLineEdit(MemberData)
self.emailEdit.setGeometry(QtCore.QRect(50, 90, 200, 24))
self.emailEdit.setAlignment(QtCore.Qt.AlignCenter)
self.emailEdit.setObjectName("emailEdit")
self.bankCardEdit = QtWidgets.QLineEdit(MemberData)
self.bankCardEdit.setGeometry(QtCore.QRect(50, 160, 200, 24))
self.bankCardEdit.setAlignment(QtCore.Qt.AlignCenter)
self.bankCardEdit.setObjectName("bankCardEdit")
self.label_5 = QtWidgets.QLabel(MemberData)
self.label_5.setGeometry(QtCore.QRect(100, 130, 100, 25))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.department_1_Edit = QtWidgets.QLineEdit(MemberData)
self.department_1_Edit.setGeometry(QtCore.QRect(50, 300, 200, 24))
self.department_1_Edit.setAlignment(QtCore.Qt.AlignCenter)
self.department_1_Edit.setObjectName("department_1_Edit")
self.label_6 = QtWidgets.QLabel(MemberData)
self.label_6.setGeometry(QtCore.QRect(100, 270, 100, 25))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.professionEdit = QtWidgets.QLineEdit(MemberData)
self.professionEdit.setGeometry(QtCore.QRect(50, 230, 200, 24))
self.professionEdit.setAlignment(QtCore.Qt.AlignCenter)
self.professionEdit.setObjectName("professionEdit")
self.label_7 = QtWidgets.QLabel(MemberData)
self.label_7.setGeometry(QtCore.QRect(100, 200, 100, 25))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.department_2_Edit = QtWidgets.QLineEdit(MemberData)
self.department_2_Edit.setGeometry(QtCore.QRect(50, 370, 200, 24))
self.department_2_Edit.setAlignment(QtCore.Qt.AlignCenter)
self.department_2_Edit.setObjectName("department_2_Edit")
self.label_8 = QtWidgets.QLabel(MemberData)
self.label_8.setGeometry(QtCore.QRect(100, 340, 100, 25))
font = QtGui.QFont()
font.setFamily("Consolas")
font.setPointSize(16)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.label = QtWidgets.QLabel(MemberData)
self.label.setGeometry(QtCore.QRect(254, 98, 10, 10))
self.label.setObjectName("label")
self.label_9 = QtWidgets.QLabel(MemberData)
self.label_9.setGeometry(QtCore.QRect(255, 169, 10, 10))
self.label_9.setObjectName("label_9")
self.retranslateUi(MemberData)
QtCore.QMetaObject.connectSlotsByName(MemberData)
def retranslateUi(self, MemberData):
_translate = QtCore.QCoreApplication.translate
MemberData.setWindowTitle(_translate("MemberData", "员工信息"))
self.saveInOutButton.setText(_translate("MemberData", "保存"))
self.label_2.setText(_translate("MemberData", "入职时间"))
self.label_3.setText(_translate("MemberData", "离职时间"))
self.nameLabel.setText(_translate("MemberData", "李林聪"))
self.label_4.setText(_translate("MemberData", "邮箱"))
self.label_5.setText(_translate("MemberData", "银行卡号"))
self.label_6.setText(_translate("MemberData", "部门1"))
self.label_7.setText(_translate("MemberData", "职务"))
self.label_8.setText(_translate("MemberData", "部门2"))
self.label.setText(_translate("MemberData", "*"))
self.label_9.setText(_translate("MemberData", "*"))
|
from __future__ import print_function
from xml.dom import minidom
import json
import jsonpickle
import sys
class Point:
x = 0
y = 0
def __init__(self, x, y):
self.x = x
self.y = y
class Rect:
x = 0
y = 0
w = 0
h = 0
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
class Char:
width = 0
offset = Point(0, 0)
rect = Rect(0, 0, 0, 0)
code = ' '
def __init__(self, element):
self.width = element.attributes['width'].value
self.rect = self.parseRect(element.attributes['rect'].value)
self.offset = self.parseOffset(element.attributes['offset'].value)
self.code = element.attributes['code'].value
def parseRect(self, rectStr):
r = rectStr.split()
return Rect(int(r[0]), int(r[1]), int(r[2]), int(r[3]))
def parseOffset(self, pointStr):
p = pointStr.split()
return Point(int(p[0]), int(p[1]))
class Font:
size = 0
family = ''
height = 0
style = ''
chars = []
def __init__(self, element):
self.size = element.attributes['size'].value
self.family = element.attributes['family'].value
self.height = element.attributes['height'].value
self.style = element.attributes['style'].value
self.chars = []
def addChar(self, c):
self.chars.append(c)
if len(sys.argv) <= 1:
print('insufficient arguments')
sys.exit(0)
infilename = sys.argv[1] + '.xml'
outfilename = sys.argv[1] + '.json'
xmldoc = minidom.parse(infilename)
f = Font(xmldoc.getElementsByTagName('Font')[0])
chars = xmldoc.getElementsByTagName('Char')
for c in chars:
ch = Char(c)
f.addChar(ch)
outfile = open(outfilename, 'w+')
print(json.dumps(json.loads(jsonpickle.encode(f, unpicklable=False)), indent=4), file=outfile) |
# Generated by Django 2.0.5 on 2018-05-30 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('board', '0004_board_team'),
]
operations = [
migrations.RemoveField(
model_name='sprint',
name='duration',
),
]
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
data1 = pd.read_csv("deliveries.csv")
data2 = pd.read_csv("matches.csv")
# In[3]:
data1.head()
data1.columns
# In[4]:
data1.shape
# In[5]:
data2.head()
# In[6]:
data2.shape
# In[7]:
categorical_data1 = data1.dtypes[data1.dtypes == "object"].index
print(categorical_data1)
# In[8]:
data2.info()
# In[9]:
data1.apply(lambda x:sum(x.isnull()))
# In[10]:
data1 = data1.fillna('0')
# In[11]:
data1.apply(lambda x:sum(x.isnull()))
# In[12]:
data2.apply(lambda x:sum(x.isnull()))
# In[13]:
data2 =data2.drop('umpire3',axis =1)
# In[14]:
data2['umpire1'].value_counts()
# In[15]:
data2['umpire1'] =data2['umpire1'].fillna('HDPK Dharmasena ')
# In[16]:
data2['umpire2'].value_counts()
# In[17]:
data2['umpire2']=data2['umpire2'].fillna("SJA Taufel")
# In[18]:
data2['city'].value_counts()
# In[19]:
data2['city']=data2['city'].fillna("Mumbai")
# In[20]:
data1['match_id']
# In[21]:
data2['season'].value_counts()
# In[22]:
sns.countplot(x=data2['season'], data=data2)
# In[23]:
data2['city'].value_counts()
# In[24]:
plt.figure(figsize=(15,7))
sns.countplot(x=data2['city'], data=data2)
plt.xticks(rotation = 'vertical')
# In[25]:
data2['toss_winner'].value_counts()
# In[26]:
plt.figure(figsize=(15,7))
sns.countplot(x=data2['toss_winner'], data=data2)
plt.xticks(rotation = 'vertical')
# In[27]:
data2['result'].value_counts()
# In[28]:
data2['city'].value_counts().plot(kind='bar', color='blue')
# In[29]:
data2['toss_winner'].value_counts()
# In[30]:
data2['winner'].value_counts()
# In[31]:
plt.figure(figsize=(15,7))
sns.countplot(x=data2['toss_winner'],hue=data2['toss_decision'],data=data2)
plt.xticks(rotation='vertical')
# In[32]:
plt.figure(figsize=(15,7))
sns.countplot(x=data2['winner'],hue=data2['toss_decision'],data=data2)
plt.xticks(rotation='vertical')
# In[33]:
plt.figure(figsize=(12,7))
temp=data2['toss_decision'].value_counts()
sizes = (np.array((temp / temp.sum())*100))
plt.pie(sizes, labels=(np.array(temp.index)),colors=['lightgreen', 'lightblue'],
autopct='%1.1f%%',shadow=True, startangle=90,explode=(0,0.03))
plt.title("Toss decision percentage")
plt.show()
# In[34]:
plt.figure(figsize=(12,7))
temp=data2[data2['toss_winner']==data2['winner']]
sizes = (len(temp),(data2.shape[0]-len(temp)))
labels = ['toss_winner wins match','toss_winner loses match']
plt.pie(sizes, labels=labels,colors=['yellow', 'pink'],
autopct='%1.2f%%',shadow=True, startangle=90,explode=(0,0.03))
plt.title("toss wins vs toss loss")
plt.show()
# In[35]:
temp1 = data2
temp1['Toss_Winner_is_Match_Winner'] = 'no'
temp1['Toss_Winner_is_Match_Winner'].loc[data2['toss_winner']==data2['winner']] = 'yes'
plt.figure(figsize=(15,7))
sns.countplot(x='toss_winner', hue='Toss_Winner_is_Match_Winner', data=temp1)
plt.xticks(rotation='vertical')
plt.show()
# In[36]:
temp1['Toss_Winner_is_Match_Winner'].value_counts()
# In[37]:
bowlers = data2[['id','season']].merge(data1, right_on='match_id',left_on='id',how='left').drop('id',axis=1)
bowlers.head()
# In[38]:
bowlers.info()
# In[39]:
total_wickets = bowlers[bowlers.dismissal_kind !='0']
total_wickets['dismissal_kind'].value_counts()
# In[40]:
plt.figure(figsize=(12,7))
sns.countplot(x=total_wickets['dismissal_kind'],data=total_wickets)
plt.xticks(rotation='vertical')
# In[41]:
matches_played_byteams=pd.concat([data2['team1'],data2['team2']])
# In[42]:
matches_played_byteams.head()
# In[43]:
matches_played_byteams=matches_played_byteams.value_counts().reset_index()
matches_played_byteams.columns=['Team','Total Matches']
matches_played_byteams['wins']=data2['winner'].value_counts().reset_index()['winner']
matches_played_byteams.set_index('Team',inplace=True)
# In[44]:
trace1 = plt.Bar(x=matches_played_byteams.index,
y=matches_played_byteams['Total Matches'],
name='Total Matches')
trace2 = plt.Bar(x=matches_played_byteams.index,
y=matches_played_byteams['wins'],
name='Matches Won')
data = [trace1, trace2]
layout = plt.Layout(barmode='stack')
# In[45]:
plt.figure(figsize=(15,6))
temp = sns.countplot(x='season',data=total_wickets)
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+.20, i.get_height()),fontsize=15)
# In[46]:
total_wickets['bowler'].value_counts()
# In[47]:
plt.figure(figsize=(25,16))
temp = total_wickets['bowler'].value_counts()[:20].plot(kind='bar', color=sns.color_palette('autumn',10))
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+.20, i.get_height()),fontsize=15)
# In[48]:
batsmen = data2[['id','season']].merge(data1, right_on='match_id',left_on='id',how='left').drop('id',axis=1)
batsmen.head()
# In[65]:
temp = batsmen.groupby('batsman')['batsman_runs'].sum().reset_index()
temp = temp.sort_values('batsman_runs', ascending=False)[:10]
temp.reset_index(drop=True)
# In[66]:
temp = temp.plot(kind='bar', x='batsman', y='batsman_runs', width=0.8, color=sns.color_palette('summer',20))
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+0.20, i.get_height()),fontsize=15)
fig=plt.gcf()
fig.set_size_inches(14,6)
plt.show()
# In[67]:
total_wickets.columns
# In[78]:
temp = batsmen.groupby('season')['total_runs'].sum()
temp.head()
# In[79]:
temp = temp.plot(kind='bar', x='season', y='total_runs', width=0.8, color=sns.color_palette('summer',20))
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+0.20, i.get_height()),fontsize=15)
fig=plt.gcf()
fig.set_size_inches(14,6)
plt.show()
# In[83]:
boundary = ['4']
fours = batsmen[batsmen['batsman_runs'].isin(boundary)]
fours['batsman'].value_counts()[:10]
# In[91]:
plt.figure(figsize=(25,16))
temp = fours['batsman'].value_counts()[:10].plot(kind='bar', color=sns.color_palette('autumn',10))
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+.20, i.get_height()),fontsize=15)
# In[93]:
plt.figure(figsize=(25,16))
temp=sns.countplot(x=fours['season'],data=fours)
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+.20, i.get_height()),fontsize=15)
# In[95]:
boundary = ['6']
sixes = batsmen[batsmen['batsman_runs'].isin(boundary)]
sixes['batsman'].value_counts()[:10]
# In[96]:
plt.figure(figsize=(25,16))
temp = sixes['batsman'].value_counts()[:10].plot(kind='bar', color=sns.color_palette('autumn',10))
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+.20, i.get_height()),fontsize=15)
# In[97]:
plt.figure(figsize=(25,16))
temp=sns.countplot(x=sixes['season'],data=sixes)
for i in temp.patches:
temp.annotate(format(i.get_height()),(i.get_x()+.20, i.get_height()),fontsize=15)
# In[101]:
a=sixes.groupby("season")["batsman_runs"].agg(lambda four : four.sum()).reset_index()
b=fours.groupby("season")["batsman_runs"].agg(lambda six: six.sum()).reset_index()
boundaries=a.merge(b,left_on='season',right_on='season',how='left')
# In[99]:
boundaries.head()
# In[102]:
boundaries.plot(x='batsman_runs_x', y='batsman_runs_y', marker='o')
# In[103]:
boundaries.set_index('season')[['batsman_runs_x','batsman_runs_y']].plot(marker='o',color=['red','green'])
fig=plt.gcf()
fig.set_size_inches(10,6)
plt.show()
|
import random
class Color:
Red = 0
Yellow = 1
Blue = 2
Green = 3
Wild = 4
Str = ["R", "Y", "B", "G", "W"]
class Card:
def __init__(self, number, color):
self.number = number
self.color = color
def matches(self, check):
return self.number == check.number \
or self.color == check.color \
or self.color == Color.Wild \
or check.color == Color.Wild
def ToString(self):
return str(self.number) + Color.Str[self.color]
# Static method
def ToCard(string):
if string[0] == "+":
return Card("+" + string[1], Color.Str.index(string[2]))
else:
return Card(int(string[0]), Color.Str.index(string[1]))
class Deck:
def __init__(self):
cards = []
for col in [Color.Red, Color.Blue, Color.Green, Color.Red]:
for i in range(10): # 0-9
cards.append(Card(i, col))
for i in range(2):
cards.append(Card("+2", col))
for i in range(4):
cards.append(Card("0", Color.Wild))
for i in range(4):
cards.append(Card("+4", Color.Wild))
self.cards = cards
self.shuffle()
self.active = self.draw()
def shuffle(self):
for i in range(2000):
x = random.randint(0, len(self.cards) - 1)
y = random.randint(0, len(self.cards) - 1)
temp = self.cards[x]
self.cards[x] = self.cards[y]
self.cards[y] = temp
def print_active(self):
print(self.active.ToString())
def draw(self):
return self.cards.pop()
# For debugging
def print(self):
for card in self.cards:
print(card.number, card.color)
class Player:
def __init__(self, deck):
hand = []
for i in range(7):
hand.append(deck.draw())
self.hand = hand
def draw(self, deck):
self.hand.append(deck.draw())
def print_hand(self):
statement = ""
for card in self.hand:
statement += card.ToString() + " "
print(statement)
# -1 = not has, otherwise returns index
def has(self, check):
i = 0
#print("CHECK", check.number, check.color)
for card in self.hand:
#print("CARD", card.number, card.color)
if str(card.number) == str(check.number) and str(card.color) == str(check.color):
return i
i += 1
return -1
# -1 = not has card, -2 = not match, 0 = played, 1 = won
def play(self, card, deck):
index = self.has(card)
if index == -1:
return -1
if not card.matches(deck.active):
return -2
deck.active = card
del self.hand[index]
if self.check_win():
return 1
return 0
def check_win(self):
return len(self.hand) == 0
deck = Deck()
#deck.print()
player = Player(deck)
playing = True
while playing:
print("Your hand:")
player.print_hand()
print("\nActive card:")
deck.print_active()
action = input("\nDraw or Play XX: ")
pieces = action.split(" ")
if pieces[0] == "Draw":
player.draw(deck)
elif pieces[0] == "Play":
result = player.play(Card.ToCard(pieces[1]), deck)
#print("Result", result)
if result == 1:
print("\n--- YOU WIN! ---")
playing = False
if result == -1:
print("\n\n--- You don't have that card... ---")
if result == -2:
print("\n\n--- That card is not a match... ---")
else:
print("\n\n--- Unknown action :( ---")
print()
|
from distutils.core import setup, Extension
from Cython.Distutils import build_ext
import numpy
import subprocess
import os
python_root = subprocess.Popen("which python", shell=True, stdout=subprocess.PIPE
).stdout.read().decode().strip()
print(python_root)
python_root = os.path.join(os.path.split(python_root)[0], '..')
libdr = ['/usr/local/lib']
incdr = [numpy.get_include(), '/usr/local/include/', os.path.join(python_root, 'include')]
# incdr = [numpy.get_include(), '/usr/local/include/', '/data/anaconda2/envs/py3/bin/../include']
ext = [
Extension('cvt', ['python/cvt.pyx'],
language='c++',
extra_compile_args=['-std=c++11'],
include_dirs=incdr,
library_dirs=libdr,
libraries=['opencv_core']),
Extension('KCF', ['python/KCF.pyx', 'src/kcftracker.cpp', 'src/fhog.cpp'],
language='c++',
extra_compile_args=['-std=c++11'],
include_dirs=incdr,
library_dirs=libdr,
libraries=['opencv_core', 'opencv_imgproc'])
]
setup(
name='KCFcpp',
version='0.0.1',
cmdclass={'build_ext': build_ext},
ext_modules=ext
)
# python setup.py build_ext --inplace
|
# if_sampple07.py
num = input('正の整数を入力してください:')
num = int(num)
if num > 0:
if num % 2 == 0:
print('正の偶数')
else:
print('正の奇数')
print('処理を終了します')
else:
print('正の数を入力してください')
|
from django.conf.urls import url
from django.urls import path, re_path
from . import views
app_name = 'pitanja'
urlpatterns = [
# /index/
path('', views.ObjavaView, name='pitanja'),
]
|
'''statSaukip_1.0
Updates:
- range of rows is automatic
Problems:
- не достаточно доступа для забора файлов с сервера этой прогой; тогда сначала нужно перенести интересующий год к себе на комп
'''
from csv import reader
from openpyxl import load_workbook, Workbook
from os import getcwd, listdir, chdir, remove, makedirs
from os.path import exists, isdir
def csv_to_xlsx(fileName):
with open(fileName, newline='') as csvfile:
wb = Workbook()
ws = wb.active
ws.title = 'convert'
data = []
CSVreader = reader(csvfile, delimiter=';', quotechar='|')
for row in CSVreader:
datarow = []
for cell in row:
datarow.append(''.join(cell))
data.append(datarow)
for rowd in data:
ws.append(rowd)
wb.save(r'{}\{}.xlsx'.format(getcwd(), ws['F2'].value[3:5:]))
wb.close
print('Конвертация {} успешно завершена'.format(fileName))
def getInfo(fileName):
wb = load_workbook(filename=fileName, data_only=True)
ws = wb.active
y = ws['F2'].value[6:10:]
n = ws['A2'].value
info = [y, n]
return info
def getHead(fileName):
wb = load_workbook(filename=fileName, data_only=True)
ws = wb.active
head = [cell.value for cell in ws['A1':'AB1'][0]]
return head
def getData(wb):
ws = wb.active
data = []
for row in ws.iter_rows(min_row=2, min_col=1, max_row=ws.max_row, max_col=ws.max_column, values_only=True):
data.append(row)
return data
# создаём рабочую папку для результатов
workFolder = r'{}\result'.format(getcwd())
if not exists(workFolder):
makedirs(workFolder)
# находим все исходные csv
yyyy = input('Введите год: ')
csvRoot = r'{}\{}'.format(getcwd(), yyyy)
months = listdir(csvRoot)
csvFiles = []
fileType = input('''1 - объединить статистику
2 - объединить расход
иное - выход
Что выбираем? ->''')
for month in months:
if isdir(r'{}\{}'.format(csvRoot, month)) == True:
if fileType == '1':
csvFiles.append(r'{}\{}\Статистика.csv'.format(csvRoot, month))
elif fileType == '2':
csvFiles.append(r'{}\{}\Расход.csv'.format(csvRoot, month))
else:
sys.exit
# конвертируем csv в xlsx с сохранием в рабочую папку
chdir(workFolder)
for file in csvFiles:
csv_to_xlsx(file)
# находим все полученные xlsx
xlsxFiles = listdir(workFolder)
print('Объединяем файлы: \n{}'.format(xlsxFiles))
# создаём итоговый файл xlsx
year = getInfo(xlsxFiles[0])[0]
stendNumber = getInfo(xlsxFiles[0])[1]
wbr = Workbook()
wsr = wbr.active
wsr.title = "stend_{}".format(stendNumber)
# заполняем итоговый файл xlsx
head = getHead(xlsxFiles[0])
wsr.append(head)
for fileName in xlsxFiles:
wb = load_workbook(filename=fileName, data_only=True)
data = getData(wb)
for row in data:
wsr.append(row)
wb.close
# сохраняем и закрываем итоговый файл, удаляем промежуточные файлы (.xlsx), уведомляем об успешности действий
resultFileName = 'stat_{}_{}.xlsx'.format(stendNumber, year)
wbr.save(resultFileName)
wbr.close
for file in xlsxFiles:
remove(file)
print('Формирование файла статистики успешно завершено: {}'.format(resultFileName))
input()
|
import json
d='''{"Name":"Ram",
"Class":"IV",
"Age":9 }'''
python_string=json.loads(d)
print(python_string) |
#gerar 16 sub-chaves de tamanho 48
def GenerateSubkeys(k):
firstperm=InitialPermutation(k) #permutação inicial das chaves
l,r=DivideKey(firstperm) #dividir chave principal em duas com metade do tamanho
key_pairs=[]
key_pairs=InitialShift(l,r) #fazer o shift inicial
keys=[]
for a,b in key_pairs:
keys.append(FinalPermutation(a+b)) #fazer a permutação final a cada junção de cada pares de chaves
return keys
def DivideKey(k):#dividir chave em duas metades
left, right = k[:len(k)/2], k[len(k)/2:]
return(left, right)
def InitialPermutation(k): #permutação inicial segundo a tabela
kl=StringtoList(k)
kp=[]
pertable=[57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
for i in range (0,len(pertable)):
kp.append(kl[pertable[i]-1])
skp=''.join(kp)
return(skp)
def FinalPermutation(k): #permutação final segundo a tabela
kl=StringtoList(k)
kp=[]
pertable=[14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32]
for i in range (0,len(pertable)):
kp.append(kl[pertable[i]-1])
skp=''.join(kp)
return(skp)
def InitialShift(c,d): #fazer shift a cada metade de chave para gerar as 16 subchaves
pairs=[(c,d)] #por o primeiro par na lista
newc=ListtoString(list(c))
newd=ListtoString(list(d))
for i in range (0,16): #fazer 16 vezes
#aplicar left shift ao valor anterior na lista de pares
if(i==0 or i==1 or i==8 or i == 15): #se estivermos numa destas rondas aplciar o left shit so uma vez
newc=LeftShift(list(newc))
newd=LeftShift(list(newd))
else: #se estivermos em todas as outras rondas aplciar o left shift duas vez
newc=LeftShift(list(LeftShift(list(newc))))
newd=LeftShift(list(LeftShift(list(newd))))
pairs.append((newc,newd)) #acrescentar resultado a lista de pares
return pairs
def LeftShift(l): #fazer left shift a uma lista
length=len(l)
l0=l[0] #guardar valor inicial
for i in range (0,length):
if(i==length-1):
l[i]=l0 #se estivermos no ultimo valor da lista, esse passa a ser o inicial
else:
l[i]=l[i+1] #se não, o valor na posição atual passa a ser valor que estava na posição a seguir
return(ListtoString(l))
def StringtoList(s):
l=[]
for c in s:
l.append(c)
return l
def ListtoString(l):
s=""
for i in range (0,len(l)):
s=s+str(l[i])
return s
#print(GenerateSubkeys("0001001100110100010101110111100110011011101111001101111111110001"))
#####################################para testar com exemplo
def equalsString(s1,s2):
if s1 == s2:
return True
else:
return False
def checkKeys(k1,k2):
l=[]
for i in range (0,len(k1)):
l.append(equalsString(k1[i],k2[i]))
return l
def deleteSpace(s):
new=[]
for i in s:
j = i.replace(' ','')
new.append(j)
return new
stringList=['000110110000001011101111111111000111000001110010', '011110011010111011011001110110111100100111100101', '010101011111110010001010010000101100111110011001', '011100101010110111010110110110110011010100011101', '011111001110110000000111111010110101001110101000', '011000111010010100111110010100000111101100101111', '111011001000010010110111111101100001100010111100', '111101111000101000111010110000010011101111111011', '111000001101101111101011111011011110011110000001', '101100011111001101000111101110100100011001001111', '001000010101111111010011110111101101001110000110', '011101010111000111110101100101000110011111101001', '100101111100010111010001111110101011101001000001', '010111110100001110110111111100101110011100111010', '101111111001000110001101001111010011111100001010', '110010110011110110001011000011100001011111110101']
#print(checkKeys(stringList,GenerateSubkeys("0001001100110100010101110111100110011011101111001101111111110001")))
#print(GenerateSubkeys("0001001100110100010101110111100110011011101111001101111111110001"))
######################################
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 17:52:55 2021
@author: zijie
"""
import gurobipy as gp
from gurobipy import GRB
m = gp.Model("mip1")
x = m.addVar(vtype=GRB.BINARY, name="x")
y = m.addVar(vtype=GRB.BINARY, name="y")
z = m.addVar(vtype=GRB.BINARY, name="z")
m.setObjective(x + y + 2*z, GRB.MAXIMIZE)
m.addConstr(x + 2 * y + 3 * z <= 4, "c0")
m.addConstr(x + y >= 1, "c1")
m.optimize()
for v in m.getVars():
print('%s %g' % (v.varName, v.x))
print('Obj: %g' % m.objVal)
|
"""
There is a problem with your keyboard: it randomly writes symbols when you are typing a text. You need to clean
up the text by removing all symbols.
Task:
Take a text that includes some random symbols and translate it into a text that has none of them. The resulting
text should only include letters and numbers.
Input Format:
A string with random symbols.
Output Format:
A string of the text with all the symbols removed.
Sample Input:
#l$e%ts go @an#d@@ g***et #l#unch$$$
Sample Output:
lets go and get lunch
"""
import re
list=input()
pattern=r"[\w\s\d]"
x=re.findall(pattern, list)
print("".join(x))
|
'''
Created on Nov 27, 2020
@author: ian
'''
from Piece import Piece
from Square import Color
def createPieces(color):
pieces = []
# 1
points = [(0,0)]
w = 1
h = 1
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.HORIZONTAL)
#p.addSymmetry(Symmetry.VERTICAL)
#p.addSymmetry(Symmetry.ROTATIONAL)
#p.addSymmetry(Symmetry.VERT_HORIZ)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#2
points = [(0,0), (1,0)]
w = 2
h = 1
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.HORIZONTAL)
#p.addSymmetry(Symmetry.VERTICAL)
#p.addSymmetry(Symmetry.VERT_HORIZ)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#I3
points = [(0,0), (1,0), (2,0)]
w = 3
h = 1
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.HORIZONTAL)
#p.addSymmetry(Symmetry.VERT_HORIZ)
#p.addSymmetry(Symmetry.VERTICAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#I4
points = [(0,0), (1,0), (2,0), (3,0)]
w = 4
h = 1
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.VERT_HORIZ)
#p.addSymmetry(Symmetry.HORIZONTAL)
#p.addSymmetry(Symmetry.VERTICAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#I5
points = [(0,0), (1,0), (2,0), (3,0), (4,0)]
w = 5
h = 1
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.VERT_HORIZ)
#p.addSymmetry(Symmetry.HORIZONTAL)
#p.addSymmetry(Symmetry.VERTICAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#L4
points = [(0,0), (0,1), (1,0), (2,0)]
w = 3
h = 2
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#L5
points = [(0,0), (0,1), (1,0), (2,0), (3,0)]
w = 4
h = 2
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#Y
points = [(0,0), (1,0), (1,1), (2,0), (3,0)]
w = 4
h = 2
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# N
points = [(0,0), (1,0), (1,1), (2,1), (3,1)]
w = 4
h = 2
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# Z4
points = [(0,0), (1,0), (1,1), (2,1)]
w = 3
h = 2
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.VERT_HORIZ)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# Z5
points = [(0,0), (1,0), (1,1), (1,2), (2,2)]
w = 3
h = 3
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.VERT_HORIZ)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# Square, O
points = [(0,0), (1,0), (1,1), (0,1)]
w = 2
h = 2
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.ROTATIONAL)
#p.addSymmetry(Symmetry.VERT_HORIZ)
#p.addSymmetry(Symmetry.HORIZONTAL)
#p.addSymmetry(Symmetry.VERTICAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# +, X
points = [(0,1), (1,1), (2,1), (1,2), (1,0)]
w = 3
h = 3
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.ROTATIONAL)
#p.addSymmetry(Symmetry.VERT_HORIZ)
#p.addSymmetry(Symmetry.HORIZONTAL)
#p.addSymmetry(Symmetry.VERTICAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# T4
points = [(0,0), (1,0), (2,0), (1,1)]
w = 3
h = 2
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.HORIZONTAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# T5
points = [(0,0), (1,0), (2,0), (1,1), (1,2)]
w = 3
h = 3
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.HORIZONTAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#V3
points = [(0,0), (0,1), (1,0)]
w = 2
h = 2
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#V5
points = [(0,0), (0,1), (1,0), (0,2), (2,0)]
w = 3
h = 3
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#U
points = [(0,0), (0,1), (1,0), (2,0), (2,1)]
w = 3
h = 2
p = Piece(points, w, h, color)
#p.addSymmetry(Symmetry.HORIZONTAL)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#W
points = [(0,0), (1,0), (1,1), (2,1), (2,2)]
w = 3
h = 3
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
# P
points = [(0,0), (1,0), (1,1), (0,1), (0,2)]
w = 2
h = 3
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
#F
points = [(0,0), (0,1), (1,1), (2,1), (1,2)]
w = 3
h = 3
p = Piece(points, w, h, color)
p.permutations = p.permute(Piece.duplicate)
pieces.append(p)
return pieces
if __name__ == "__main__":
p = createPieces(Color.BLUE)
perms = 0
for ps in p:
perms += len(ps.permutations)
print(perms)
# F = p[-1]
# for ps in F.permutations:
# print(ps)
# for x in p:
# for y in x.permutations:
# print(y)
|
# 1
# 3
# A 10
# B 7
# C 5
T = int(input())
#숫자입력받기
for t in range(1, T+1):
N = int(input())
doc = ''
for j in range(N):
Ci, Ki = input().split()
#알파벳 Ci와 알파벳의 연속된 개수 Ki를 .split()으로 공백을 기준으로 나눠준다
Ki = int(Ki)
while True:
if Ki <= 0:
break
#알파벳 개수가 0이거나 그보다 작으면 멈춤
Ki -= 1
doc += Ci
#Ki가 1씩 줄때마다 Ci인 알파벳 하나씩 더함
print('#{}'.format(t))
count = 1
for c in doc:
print(c, end='')
if count % 10 == 0:
print()
count += 1
print()
|
from __future__ import print_function
import unittest
from pytraj import io as mdio
from pytraj.utils import eq, aa_eq
class Test(unittest.TestCase):
def test_0(self):
from pytraj.core import mass_atomic_number_dict, mass_element_dict
top = mdio.load_topology("./data/tz2.parm7")
mass_list = []
for atom in top:
mass_list.append(mass_atomic_number_dict[atom.atomic_number])
aa_eq(mass_list, top.mass, 2)
if __name__ == "__main__":
unittest.main()
|
from importlib import import_module
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "neuronit"
def ready(self):
import_module("neuronit.receivers")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-10 04:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subject', '0005_topic_ifshow'),
]
operations = [
migrations.AlterModelOptions(
name='topic',
options={'ordering': ['reproduce', 'id', 'create_date'], 'verbose_name': '主题', 'verbose_name_plural': '主题'},
),
migrations.AlterField(
model_name='chapter',
name='title',
field=models.CharField(help_text='标题', max_length=64, verbose_name='标题'),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 16:45:25 2019
@author: danie
"""
from consolemenu import *
from consolemenu.items import *
lista_tipos= []
lista_juegos = []
def mainMenu():
# Create the menu
menu = ConsoleMenu("Tienda de Videojuegos", "Menu")
imprimir = FunctionItem("Inventario", imprimirInventario)
editar = FunctionItem("Editar Inventario", editMenu)
menu.append_item(imprimir)
menu.append_item(editar)
menu.show()
menu.exit()
def editMenu():
# Create the menu
menu = ConsoleMenu("Tienda de Videojuegos", "Menu de Edición")
crear = FunctionItem("Crear Registro", menuCrearRegistro)
modificar = FunctionItem("Modificar Registro", menuEditarRegistro)
eliminar = FunctionItem("Eliminar Registro", menuEliminarRegistro)
menu.append_item(crear)
menu.append_item(modificar)
menu.append_item(eliminar)
menu.show()
def menuEditarRegistro():
exist = False
text = input('Ingrese el IDV del Videojuego a Eliminar: ')
for juego in lista_juegos:
if juego.getIDV() == text:
print("\nCampos posibles de modificar:\n1:Nombre\n2:Clasificacion\n3:Desarrollador\n")
tipo = input('Ingrese el tipo de campo por modificar: ')
dato = input('Ingrese el nuevo valor del campo: ')
juego.modificarRegistro(tipo,dato)
lista_juegos[lista_juegos.index(juego)]=juego
print("\nRegistro Modificado")
exist = True
if not exist:
print("El identificador ingresado no existe en los registros")
def menuCrearRegistro():
print("\nLista de Generos:\n")
for tipo in lista_tipos:
print(tipo[0]+":"+tipo[1]+"\n")
text = input('Seleccione el tipo de genero del videojuego que desea ingresar: ')
name = input('Ingrese el nombre del videojuego: ')
clasificacion = input('Ingrese la clasificacion del videojuego: ')
desarrollador = input('Ingrese el desarrollador del videojuego: ')
juego = Videojuego(text,lista_tipos[int(text)-1][1],len(lista_juegos)+1,name,clasificacion,desarrollador)
escribirArchivo()
lista_juegos.append((juego))
print("\nRegistro Ingresado")
def menuEliminarRegistro():
exist = False
text = input('Ingrese el IDV del Videojuego a Eliminar: ')
for juego in lista_juegos:
if juego.getIDV() == text:
lista_juegos.remove(juego)
juego.eleminarRegistro()
print("\nRegistro Eliminado")
exist = True
if not exist:
print("El identificador ingresado no existe en los registros")
def escribirArchivo():
path = './juegos.txt'
text = ""
archivo_escritura_abierto = open(path, mode="w")
#text = str(self.__IDT+","+self.__nombre+","+self.__clasificacion+","+self.__desarrollador)
for juego in lista_juegos:
archivo_escritura_abierto.write(str(juego.lineaAImprimir()))
archivo_escritura_abierto.close()
def imprimirInventario():
print("\nLista de VideoJuegos:\n")
for juego in lista_juegos:
print(juego)
def cargarDatos():
try:
path = "./tipos.txt"
archivo_abierto = open(path)
contenido = archivo_abierto.readlines()
for linea in contenido:
linea = linea.split(",")
lista_tipos.append([linea[0],linea[1]])
archivo_abierto.close()
except Exception as error:
print("Error")
try:
path = "./juegos.txt"
archivo_abierto = open(path)
contenido = archivo_abierto.readlines()
for linea in contenido:
linea = linea.split(",")
IDT = linea[1]
if any(IDT in lista_tipos for lista_tipos in lista_tipos):
juego = Videojuego(IDT,lista_tipos[int(linea[1])][1],linea[0],linea[2],linea[3],linea[4])
lista_juegos.append((juego))
archivo_abierto.close()
except Exception as error:
print("Error")
class TipoDeJuego:
__IDT: None
__genero: None
def __init__(self,IDT,genero):
self.__IDT = IDT
self.__genero = genero
def __str__(self):
return f"IDT: {self.__IDT}\n" + f"Genero: {self.__genero}\n"
def getIDT(self):
return self.__IDT
class Videojuego(TipoDeJuego):
__IDV = None
__nombre = None
__clasificacion = None
__desarrollador = None
def __init__(self,IDT,genero,IDV,nombre,clasificacion,desarrollador):
super().__init__(IDT,genero)
self.__IDV = IDV
self.__nombre = nombre
self.__clasificacion = clasificacion
self.__desarrollador = desarrollador
def getIDV(self):
return self.__IDV
def eleminarRegistro(self):
escribirArchivo()
def modificarRegistro(self,tipo,dato):
if tipo == "1":
self.__nombre = dato
else:
if tipo == "2":
self.__clasificacion = dato
else:
if tipo == "3":
self.__desarrollador = dato
escribirArchivo()
def lineaAImprimir(self):
return f"{self.__IDV},"+self.getIDT()+f",{self.__nombre},{self.__clasificacion},{self.__desarrollador}"
def __str__(self):
return super().__str__() + f"IDV: {self.__IDV}\n" + f"Nombre: {self.__nombre}\n" + f"Clasificacion: {self.__clasificacion}\n" + f"Desarrollador: {self.__desarrollador}\n"
cargarDatos()
mainMenu() |
# -*- coding: ISO-8859-1 -*-
#
# generated by wxGlade 0.9.3 on Thu Jun 27 21:45:40 2019
#
import sys
import traceback
import wx
import wx.ribbon as RB
from About import About
from Adjustments import Adjustments
from Alignment import Alignment
from BufferView import BufferView
from CameraInteface import CameraInterface
from Controller import Controller
from DefaultModules import *
from DeviceManager import DeviceManager
from EngraveProperty import EngraveProperty
from ImageProperty import ImageProperty
from JobInfo import JobInfo
from JobSpooler import JobSpooler
from Kernel import *
from Keymap import Keymap
from LaserOperation import *
from LaserRender import LaserRender, swizzlecolor
from Navigation import Navigation
from OperationPreprocessor import OperationPreprocessor
from PathProperty import PathProperty
from Preferences import Preferences
from RasterProperty import RasterProperty
from RotarySettings import RotarySettings
from Settings import Settings
from Shutdown import Shutdown
from TextProperty import TextProperty
from UsbConnect import UsbConnect
from ZMatrix import ZMatrix
from icons import *
from svgelements import *
"""
Laser software for the Stock-LIHUIYU laserboard.
MeerK40t (pronounced MeerKat) is a built-from-the-ground-up MIT licensed
open-source laser cutting software. See https://github.com/meerk40t/meerk40t
for full details.
wxMeerK40t is the primary gui addon for MeerK40t. It requires wxPython for the interface.
The Transformations work in Windows for wxPython 4.0+ and OSX/Linux wxPython 4.1+.
"""
MILS_IN_MM = 39.3701
MEERK40T_VERSION = "0.5.2"
MEERK40T_ISSUES = "https://github.com/meerk40t/meerk40t/issues"
MEERK40T_WEBSITE = "https://github.com/meerk40t/meerk40t"
class IdInc:
"""
Id Incrementor
"""
def __init__(self):
self.id_highest_value = wx.ID_HIGHEST
def new(self):
self.id_highest_value += 1
return self.id_highest_value
idinc = IdInc()
ID_MAIN_TOOLBAR = idinc.new()
ID_ADD_FILE = idinc.new()
ID_OPEN = idinc.new()
ID_SAVE = idinc.new()
ID_NAV = idinc.new()
ID_USB = idinc.new()
ID_CONTROLLER = idinc.new()
ID_PREFERENCES = idinc.new()
ID_DEVICES = idinc.new()
ID_JOB = idinc.new()
ID_SPOOLER = idinc.new()
ID_CUT_CONFIGURATION = idinc.new()
ID_SELECT = idinc.new()
ID_MENU_NEW = idinc.new()
ID_MENU_OPEN_PROJECT = idinc.new()
ID_MENU_RECENT_PROJECT = idinc.new()
ID_MENU_IMPORT = idinc.new()
ID_MENU_SAVE = idinc.new()
ID_MENU_SAVE_AS = idinc.new()
ID_MENU_EXIT = idinc.new()
ID_MENU_ZOOM_OUT = idinc.new()
ID_MENU_ZOOM_IN = idinc.new()
ID_MENU_ZOOM_SIZE = idinc.new()
# 1 fill, 2 grids, 4 guides, 8 laserpath, 16 writer_position, 32 selection
ID_MENU_HIDE_FILLS = idinc.new()
ID_MENU_HIDE_GUIDES = idinc.new()
ID_MENU_HIDE_GRID = idinc.new()
ID_MENU_HIDE_STROKES = idinc.new()
ID_MENU_HIDE_LASERPATH = idinc.new()
ID_MENU_HIDE_RETICLE = idinc.new()
ID_MENU_HIDE_SELECTION = idinc.new()
ID_MENU_SCREEN_REFRESH = idinc.new()
ID_MENU_SCREEN_ANIMATE = idinc.new()
ID_MENU_HIDE_IMAGE = idinc.new()
ID_MENU_HIDE_PATH = idinc.new()
ID_MENU_HIDE_TEXT = idinc.new()
ID_MENU_ALIGNMENT = idinc.new()
ID_MENU_ABOUT = idinc.new()
ID_MENU_KEYMAP = idinc.new()
ID_MENU_DEVICE_MANAGER = idinc.new()
ID_MENU_PREFERENCES = idinc.new()
ID_MENU_SETTINGS = idinc.new()
ID_MENU_ROTARY = idinc.new()
ID_MENU_NAVIGATION = idinc.new()
ID_MENU_CONTROLLER = idinc.new()
ID_MENU_CAMERA = idinc.new()
ID_MENU_USB = idinc.new()
ID_MENU_SPOOLER = idinc.new()
ID_MENU_JOB = idinc.new()
ID_MENU_TREE = idinc.new()
ID_MENU_WEBPAGE = idinc.new()
ID_CUT_TREE = idinc.new()
ID_CUT_BURN_BUTTON = idinc.new()
_ = wx.GetTranslation
supported_languages = (('en', u'English', wx.LANGUAGE_ENGLISH),
('fr', u'franšais', wx.LANGUAGE_FRENCH),
('de', u'Deutsch', wx.LANGUAGE_GERMAN),
('es', u'espa˝ol', wx.LANGUAGE_SPANISH))
class MeerK40t(wx.Frame):
"""
MeerK40t main window
"""
def __init__(self, *args, **kwds):
# begin wxGlade: MeerK40t.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.DragAcceptFiles(True)
self.tree = wx.TreeCtrl(self, wx.ID_ANY, style=wx.TR_MULTIPLE | wx.TR_HIDE_ROOT | wx.TR_HAS_BUTTONS)
self.scene = wx.Panel(self, style=wx.EXPAND | wx.WANTS_CHARS)
self.scene.SetDoubleBuffered(True)
self._ribbon = RB.RibbonBar(self, style=RB.RIBBON_BAR_DEFAULT_STYLE
| RB.RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS)
home = RB.RibbonPage(self._ribbon, wx.ID_ANY, _("Examples"), icons8_opened_folder_50.GetBitmap())
toolbar_panel = RB.RibbonPanel(home, wx.ID_ANY, _("Toolbar"),
style=RB.RIBBON_PANEL_NO_AUTO_MINIMISE | RB.RIBBON_PANEL_EXT_BUTTON)
toolbar = RB.RibbonToolBar(toolbar_panel, ID_MAIN_TOOLBAR)
self.toolbar = toolbar
toolbar.AddTool(ID_OPEN, icons8_opened_folder_50.GetBitmap(), "") # "Open",
toolbar.AddTool(ID_SAVE, icons8_save_50.GetBitmap(), "")
toolbar.AddTool(ID_JOB, icons8_laser_beam_52.GetBitmap(), "")
windows_panel = RB.RibbonPanel(home, wx.ID_ANY, _("Windows"), icons8_opened_folder_50.GetBitmap())
windows = RB.RibbonButtonBar(windows_panel)
windows.AddButton(ID_NAV, _("Navigation"), icons8_move_32.GetBitmap(), "")
windows.AddButton(ID_USB, _("Usb"), icons8_usb_connector_50.GetBitmap(), "")
windows.AddButton(ID_SPOOLER, _("Spooler"), icons8_route_50.GetBitmap(), "")
windows.AddButton(ID_CONTROLLER, _("Controller"), icons8_connected_50.GetBitmap(), "")
windows.AddButton(ID_PREFERENCES, _("Preferences"), icons8_administrative_tools_50.GetBitmap(), "")
windows.AddButton(ID_DEVICES, _("Devices"), icons8_manager_50.GetBitmap(), "")
self._ribbon.Realize()
self.CenterOnScreen()
# Menu Bar
self.main_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(ID_MENU_NEW, _("New"), "")
wxglade_tmp_menu.Append(ID_MENU_OPEN_PROJECT, _("Open Project"), "")
wxglade_tmp_menu.Append(ID_MENU_IMPORT, _("Import File"), "")
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(ID_MENU_SAVE, _("Save"), "")
wxglade_tmp_menu.Append(ID_MENU_SAVE_AS, _("Save As"), "")
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(ID_MENU_EXIT, _("Exit"), "")
self.main_menubar.Append(wxglade_tmp_menu, _("File"))
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(ID_MENU_ZOOM_OUT, _("Zoom Out"), "")
wxglade_tmp_menu.Append(ID_MENU_ZOOM_IN, _("Zoom In"), "")
wxglade_tmp_menu.Append(ID_MENU_ZOOM_SIZE, _("Zoom To Size"), "")
wxglade_tmp_menu.AppendSeparator()
wxglade_tmp_menu.Append(ID_MENU_HIDE_GRID, _("Hide Grid"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_GUIDES, _("Hide Guides"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_PATH, _("Hide Paths"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_IMAGE, _("Hide Images"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_TEXT, _("Hide Text"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_FILLS, _("Hide Fills"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_STROKES, _("Hide Strokes"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_LASERPATH, _("Hide Laserpath"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_RETICLE, _("Hide Reticle"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_HIDE_SELECTION, _("Hide Selection"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_SCREEN_REFRESH, _("Do Not Refresh"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.Append(ID_MENU_SCREEN_ANIMATE, _("Do Not Animate"), "", wx.ITEM_CHECK)
self.main_menubar.Append(wxglade_tmp_menu, _("View"))
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(ID_MENU_PREFERENCES, _("Preferences"), "")
wxglade_tmp_menu.Append(ID_MENU_SETTINGS, _("Settings"), "")
wxglade_tmp_menu.Append(ID_MENU_ROTARY, _("Rotary Settings"), "")
wxglade_tmp_menu.Append(ID_MENU_KEYMAP, _("Keymap Settings"), "")
wxglade_tmp_menu.Append(ID_MENU_DEVICE_MANAGER, _("Device Manager"), "")
wxglade_tmp_menu.Append(ID_MENU_ALIGNMENT, _("Alignment Ally"), "")
wxglade_tmp_menu.Append(ID_MENU_CAMERA, _("Camera"), "")
wxglade_tmp_menu.Append(ID_MENU_NAVIGATION, _("Navigation"), "")
wxglade_tmp_menu.Append(ID_MENU_CONTROLLER, _("Controller"), "")
wxglade_tmp_menu.Append(ID_MENU_USB, _("USB"), "")
wxglade_tmp_menu.Append(ID_MENU_SPOOLER, _("Job Spooler"), "")
wxglade_tmp_menu.Append(ID_MENU_JOB, _("Execute Job"), "")
self.main_menubar.Append(wxglade_tmp_menu, _("Windows"))
wxglade_tmp_menu = wx.Menu()
wxglade_tmp_menu.Append(ID_MENU_WEBPAGE, _("Webpage"), "")
wxglade_tmp_menu.Append(ID_MENU_ABOUT, _("About"), "")
self.main_menubar.Append(wxglade_tmp_menu, _("Help"))
self.SetMenuBar(self.main_menubar)
# Menu Bar end
self.Bind(wx.EVT_MENU, self.on_click_new, id=ID_MENU_NEW)
self.Bind(wx.EVT_MENU, self.on_click_open, id=ID_MENU_OPEN_PROJECT)
self.Bind(wx.EVT_MENU, self.on_click_open, id=ID_MENU_IMPORT)
self.Bind(wx.EVT_MENU, self.on_click_save, id=ID_MENU_SAVE)
self.Bind(wx.EVT_MENU, self.on_click_save_as, id=ID_MENU_SAVE_AS)
self.Bind(wx.EVT_MENU, self.on_click_exit, id=ID_MENU_EXIT)
self.Bind(wx.EVT_MENU, self.on_click_zoom_out, id=ID_MENU_ZOOM_OUT)
self.Bind(wx.EVT_MENU, self.on_click_zoom_in, id=ID_MENU_ZOOM_IN)
self.Bind(wx.EVT_MENU, self.on_click_zoom_size, id=ID_MENU_ZOOM_SIZE)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0004), id=ID_MENU_HIDE_GRID)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0002), id=ID_MENU_HIDE_GUIDES)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0400), id=ID_MENU_HIDE_PATH)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0800), id=ID_MENU_HIDE_IMAGE)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x1000), id=ID_MENU_HIDE_TEXT)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0001), id=ID_MENU_HIDE_FILLS)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0008), id=ID_MENU_HIDE_LASERPATH)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0010), id=ID_MENU_HIDE_RETICLE)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0020), id=ID_MENU_HIDE_SELECTION)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0040), id=ID_MENU_HIDE_STROKES)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0100), id=ID_MENU_SCREEN_REFRESH)
self.Bind(wx.EVT_MENU, self.toggle_draw_mode(0x0200), id=ID_MENU_SCREEN_ANIMATE)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("About"), id=ID_MENU_ABOUT)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("Alignment"), id=ID_MENU_ALIGNMENT)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("CameraInterface"), id=ID_MENU_CAMERA)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("DeviceManager"), id=ID_MENU_DEVICE_MANAGER)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("Keymap"), id=ID_MENU_KEYMAP)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("Preferences"), id=ID_MENU_PREFERENCES)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("Settings"), id=ID_MENU_SETTINGS)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("Rotary"), id=ID_MENU_ROTARY)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("Navigation"), id=ID_MENU_NAVIGATION)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("Controller"), id=ID_MENU_CONTROLLER)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("UsbConnect"), id=ID_MENU_USB)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("JobSpooler"), id=ID_MENU_SPOOLER)
self.Bind(wx.EVT_MENU, lambda v: self.kernel.open_window("JobInfo").set_operations(self.kernel.operations),
id=ID_MENU_JOB)
self.Bind(wx.EVT_MENU, self.launch_webpage, id=ID_MENU_WEBPAGE)
toolbar.Bind(RB.EVT_RIBBONTOOLBAR_CLICKED, self.on_click_open, id=ID_OPEN)
toolbar.Bind(RB.EVT_RIBBONTOOLBAR_CLICKED, self.on_click_save, id=ID_SAVE)
toolbar.Bind(RB.EVT_RIBBONTOOLBAR_CLICKED,
lambda v: self.kernel.open_window("JobInfo").set_operations(self.kernel.operations), id=ID_JOB)
windows.Bind(RB.EVT_RIBBONBUTTONBAR_CLICKED, lambda v: self.kernel.open_window("UsbConnect"), id=ID_USB)
windows.Bind(RB.EVT_RIBBONBUTTONBAR_CLICKED, lambda v: self.kernel.open_window("Navigation"), id=ID_NAV)
windows.Bind(RB.EVT_RIBBONBUTTONBAR_CLICKED, lambda v: self.kernel.open_window("Controller"), id=ID_CONTROLLER)
windows.Bind(RB.EVT_RIBBONBUTTONBAR_CLICKED, lambda v: self.kernel.open_window("Preferences"),
id=ID_PREFERENCES)
windows.Bind(RB.EVT_RIBBONBUTTONBAR_CLICKED, lambda v: self.kernel.open_window("DeviceManager"), id=ID_DEVICES)
windows.Bind(RB.EVT_RIBBONBUTTONBAR_CLICKED, lambda v: self.kernel.open_window("JobSpooler"), id=ID_SPOOLER)
self.main_statusbar = self.CreateStatusBar(3)
# end wxGlade
self.Bind(wx.EVT_DROP_FILES, self.on_drop_file)
self.previous_position = None
self.matrix = Matrix()
self.previous_window_position = None
self.previous_scene_position = None
self.popup_window_position = None
self.popup_scene_position = None
self._Buffer = None
self.screen_refresh_is_requested = True
self.screen_refresh_is_running = False
self.background_brush = wx.Brush("Grey")
self.renderer = None
self.grid = None
self.guide_lines = None
self.laserpath = [[0, 0] for i in range(1000)], [[0, 0] for i in range(1000)]
self.laserpath_index = 0
self.mouse_move_function = self.move_pan
self.working_file = None
self.__set_properties()
self.__do_layout()
self.set_buffer()
self.selection_pen = wx.Pen()
self.selection_pen.SetColour(wx.BLUE)
self.selection_pen.SetWidth(25)
self.selection_pen.SetStyle(wx.PENSTYLE_SHORT_DASH)
self.scene.Bind(wx.EVT_PAINT, self.on_paint)
self.scene.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase)
self.scene.Bind(wx.EVT_MOTION, self.on_mouse_move)
self.scene.Bind(wx.EVT_MOUSEWHEEL, self.on_mousewheel)
self.scene.Bind(wx.EVT_MIDDLE_DOWN, self.on_mouse_middle_down)
self.scene.Bind(wx.EVT_MIDDLE_UP, self.on_mouse_middle_up)
self.scene.Bind(wx.EVT_LEFT_DCLICK, self.on_mouse_double_click)
self.scene.Bind(wx.EVT_RIGHT_DOWN, self.on_right_mouse_down)
self.scene.Bind(wx.EVT_RIGHT_UP, self.on_right_mouse_up)
self.scene.Bind(wx.EVT_LEFT_DOWN, self.on_left_mouse_down)
self.scene.Bind(wx.EVT_LEFT_UP, self.on_left_mouse_up)
self.scene.Bind(wx.EVT_ENTER_WINDOW, lambda event: self.scene.SetFocus()) # Focus follows mouse.
self.tree.Bind(wx.EVT_ENTER_WINDOW, lambda event: self.tree.SetFocus()) # Focus follows mouse.
self.scene.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.Bind(wx.EVT_CLOSE, self.on_close, self)
self.fps_job = None
self.kernel = None
self.root = None # RootNode value, must have kernel for init.
self.device_listening = None
self.background = None
def notify_change(self):
self.kernel.signal('rebuild_tree', 0)
def add_language_menu(self):
if os.path.exists('./locale'):
wxglade_tmp_menu = wx.Menu()
i = 0
for lang in supported_languages:
language_code, language_name, language_index = lang
m = wxglade_tmp_menu.Append(wx.ID_ANY, language_name, "", wx.ITEM_RADIO)
if i == self.kernel.language:
m.Check(True)
self.Bind(wx.EVT_MENU, self.kernel.gui.language_to(i), id=m.GetId())
if not os.path.exists('./locale/%s' % language_code) and i != 0:
m.Enable(False)
i += 1
self.main_menubar.Append(wxglade_tmp_menu, _("Languages"))
def set_kernel(self, kernel):
self.kernel = kernel
kernel.setting(int, "draw_mode", 0) # 1 fill, 2 grids, 4 guides, 8 laserpath, 16 writer_position, 32 selection
kernel.setting(int, "window_width", 1200)
kernel.setting(int, "window_height", 600)
kernel.setting(float, "units_convert", MILS_IN_MM)
kernel.setting(str, "units_name", 'mm')
kernel.setting(int, "units_marks", 10)
kernel.setting(int, "units_index", 0)
kernel.setting(bool, "mouse_zoom_invert", False)
kernel.setting(int, 'fps', 40)
kernel.setting(int, "bed_width", 320) # Default Value
kernel.setting(int, "bed_height", 220) # Default Value
self.listen_scene()
if kernel.fps <= 0:
kernel.fps = 60
self.renderer = LaserRender(kernel)
self.root = RootNode(kernel, self)
kernel.setting(wx.App, 'root', self.root)
kernel.root = self.root
if kernel.window_width < 300:
kernel.window_width = 300
if kernel.window_height < 300:
kernel.window_height = 300
kernel.add_control("Transform", self.open_transform_dialog)
kernel.add_control("Path", self.open_path_dialog)
kernel.add_control("FPS", self.open_fps_dialog)
kernel.add_control("Speedcode-Gear-Force", self.open_speedcode_gear_dialog)
kernel.add_control("Home and Dot", self.run_home_and_dot_test)
self.SetSize((kernel.window_width, kernel.window_height))
bedwidth = kernel.bed_width
bedheight = kernel.bed_height
self.kernel.boot()
self.focus_viewport_scene((0, 0, bedwidth * MILS_IN_MM, bedheight * MILS_IN_MM), 0.1)
self.fps_job = self.kernel.cron.add_job(self.refresh_scene, interval=1.0 / float(kernel.fps))
self.add_language_menu()
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_FILLS)
m.Check(self.kernel.draw_mode & 0x0001 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_GUIDES)
m.Check(self.kernel.draw_mode & 0x0002 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_GRID)
m.Check(self.kernel.draw_mode & 0x0004 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_LASERPATH)
m.Check(self.kernel.draw_mode & 0x0008 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_RETICLE)
m.Check(self.kernel.draw_mode & 0x0010 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_SELECTION)
m.Check(self.kernel.draw_mode & 0x0020 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_STROKES)
m.Check(self.kernel.draw_mode & 0x0040 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_SCREEN_REFRESH)
m.Check(self.kernel.draw_mode & 0x0100 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_SCREEN_ANIMATE)
m.Check(self.kernel.draw_mode & 0x0200 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_PATH)
m.Check(self.kernel.draw_mode & 0x0400 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_IMAGE)
m.Check(self.kernel.draw_mode & 0x0800 != 0)
m = self.GetMenuBar().FindItemById(ID_MENU_HIDE_TEXT)
m.Check(self.kernel.draw_mode & 0x1000 != 0)
self.on_size(None)
self.Bind(wx.EVT_SIZE, self.on_size)
self.space_changed(0)
self.default_keymap()
self.Bind(wx.EVT_TREE_BEGIN_DRAG, self.root.on_drag_begin_handler, self.tree)
self.Bind(wx.EVT_TREE_END_DRAG, self.root.on_drag_end_handler, self.tree)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.root.on_item_activated, self.tree)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.root.on_item_changed, self.tree)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.root.on_item_right_click, self.tree)
def set_fps(self, fps):
if fps == 0:
fps = 1
self.fps_job.times = 0
self.kernel.fps = fps
self.fps_job = self.kernel.cron.add_job(self.refresh_scene, interval=1.0 / float(self.kernel.fps))
def on_element_update(self, *args):
"""
Called by 'element_property_update' when the properties of an element are changed.
:param args:
:return:
"""
if self.root is not None:
self.root.on_element_update(*args)
def on_rebuild_tree_request(self, *args):
"""
Called by 'rebuild_tree' change. To refresh tree.
:param args:
:return:
"""
self.root.rebuild_tree()
self.request_refresh()
def on_refresh_scene(self, *args):
"""
Called by 'refresh_scene' change. To refresh tree.
:param args:
:return:
"""
self.request_refresh()
def on_usb_error(self, value):
dlg = wx.MessageDialog(None, _("All attempts to connect to USB have failed."),
_("Usb Connection Problem."), wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def on_usb_status(self, value):
if self.kernel is not None:
self.main_statusbar.SetStatusText(_("Usb: %s") % value, 0)
def on_pipe_state(self, value):
if self.kernel is not None:
self.main_statusbar.SetStatusText(_("Controller: %s") % self.kernel.get_text_thread_state(value), 1)
def on_spooler_state(self, value):
if self.kernel is not None:
self.main_statusbar.SetStatusText(_("Spooler: %s") % self.kernel.get_text_thread_state(value), 2)
def on_interpreter_mode(self, state):
if state == 0:
self.background_brush = wx.Brush("Grey")
else:
self.background_brush = wx.Brush("Red")
self.request_refresh_for_animation()
def on_background_signal(self, background):
if isinstance(background, str):
return # Assumed color.
if isinstance(background, int):
return # Assumed color.
self.background = background
self.request_refresh()
def on_device_switch(self, device):
self.unlisten_device()
self.listen_device(device)
def listen_device(self, device):
if self.device_listening is not None:
self.unlisten_device()
self.device_listening = device
if device is not None:
device.listen('pipe;error', self.on_usb_error)
device.listen('pipe;usb_status', self.on_usb_status)
device.listen('pipe;thread', self.on_pipe_state)
device.listen('spooler;thread', self.on_spooler_state)
device.listen('interpreter;position', self.update_position)
device.listen('interpreter;mode', self.on_interpreter_mode)
device.listen('bed_size', self.bed_changed)
def unlisten_device(self):
if self.device_listening is None:
return # Can't unlisten to nothing, ---
device = self.device_listening
if device is not None:
device.unlisten('pipe;error', self.on_usb_error)
device.unlisten('pipe;usb_status', self.on_usb_status)
device.unlisten('pipe;thread', self.on_pipe_state)
device.unlisten('spooler;thread', self.on_spooler_state)
device.unlisten('interpreter;position', self.update_position)
device.unlisten('interpreter;mode', self.on_interpreter_mode)
device.unlisten('bed_size', self.bed_changed)
self.device_listening = None
def listen_scene(self):
self.kernel.listen("background", self.on_background_signal)
self.kernel.listen("device", self.on_device_switch)
self.kernel.listen('rebuild_tree', self.on_rebuild_tree_request)
self.kernel.listen('refresh_scene', self.on_refresh_scene)
self.kernel.listen("element_property_update", self.on_element_update)
self.kernel.listen("units", self.space_changed)
self.kernel.listen("selected_elements", self.selection_changed)
def unlisten_scene(self):
self.kernel.unlisten("background", self.on_background_signal)
self.kernel.unlisten("device", self.on_device_switch)
self.kernel.unlisten('rebuild_tree', self.on_rebuild_tree_request)
self.kernel.unlisten('refresh_scene', self.on_refresh_scene)
self.kernel.unlisten("element_property_update", self.on_element_update)
self.kernel.unlisten("units", self.space_changed)
self.kernel.unlisten("selected_elements", self.selection_changed)
def on_close(self, event):
self.unlisten_device()
self.unlisten_scene()
self.kernel.open_window('Shutdown')
self.kernel.mark_window_closed('MeerK40t')
self.kernel.cron.stop()
event.Skip() # Call destroy as regular.
def __set_properties(self):
# begin wxGlade: MeerK40t.__set_properties
self.SetTitle(_("MeerK40t v%s") % MEERK40T_VERSION)
self.main_statusbar.SetStatusWidths([-1] * self.main_statusbar.GetFieldsCount())
_icon = wx.NullIcon
_icon.CopyFromBitmap(icon_meerk40t.GetBitmap())
self.SetIcon(_icon)
# statusbar fields
main_statusbar_fields = ["Status"]
for i in range(len(main_statusbar_fields)):
self.main_statusbar.SetStatusText(main_statusbar_fields[i], i)
def __do_layout(self):
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(self._ribbon, 1, wx.EXPAND, 0)
widget_sizer = wx.BoxSizer(wx.HORIZONTAL)
widget_sizer.Add(self.tree, 1, wx.EXPAND, 0)
widget_sizer.Add(self.scene, 5, wx.ALL | wx.EXPAND, 2)
main_sizer.Add(widget_sizer, 5, wx.EXPAND, 0)
self.SetSizer(main_sizer)
# main_sizer.Fit(self)
self.Layout()
def load(self, pathname):
results = self.kernel.load(pathname)
if results is not None:
elements, pathname, basename = results
self.kernel.classify(elements)
return True
return False
def on_drop_file(self, event):
"""
Drop file handler
Accepts multiple files drops.
"""
accepted = 0
rejected = 0
rejected_files = []
for pathname in event.GetFiles():
if self.load(pathname):
accepted += 1
else:
rejected += 1
rejected_files.append(pathname)
if rejected != 0:
reject = "\n".join(rejected_files)
err_msg = _("Some files were unrecognized:\n%s") % reject
dlg = wx.MessageDialog(None, err_msg, _('Error encountered'), wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
if accepted != 0:
self.root.notify_tree_data_change()
def on_paint(self, event):
try:
wx.BufferedPaintDC(self.scene, self._Buffer)
except RuntimeError:
pass
def set_buffer(self):
width, height = self.scene.ClientSize
if width <= 0:
width = 1
if height <= 0:
height = 1
self._Buffer = wx.Bitmap(width, height)
def on_size(self, event):
if self.kernel is None:
return
self.Layout()
self.set_buffer()
self.kernel.window_width, self.kernel.window_height = self.Size
self.guide_lines = None
self.request_refresh()
def update_position(self, pos):
self.laserpath[0][self.laserpath_index][0] = pos[0]
self.laserpath[0][self.laserpath_index][1] = pos[1]
self.laserpath[1][self.laserpath_index][0] = pos[2]
self.laserpath[1][self.laserpath_index][1] = pos[3]
self.laserpath_index += 1
self.laserpath_index %= len(self.laserpath[0])
self.request_refresh_for_animation()
def space_changed(self, units):
self.grid = None
self.on_size(None)
def bed_changed(self, size):
self.grid = None
self.on_size(None)
def selection_changed(self, selection):
self.request_refresh()
def on_erase(self, event):
pass
def request_refresh_for_animation(self):
"""Called on the various signals trying to animate the screen."""
if self.kernel.draw_mode & 0x0200 == 0:
self.request_refresh()
def request_refresh(self):
"""Request an update to the scene."""
if self.kernel.draw_mode & 0x0100 == 0:
self.screen_refresh_is_requested = True
def refresh_scene(self):
"""Called by the Scheduler at a given the specified framerate."""
if self.screen_refresh_is_requested and not self.screen_refresh_is_running:
self.screen_refresh_is_running = True
wx.CallAfter(self.refresh_in_ui)
def refresh_in_ui(self):
"""Called by refresh_scene() in the UI thread."""
if self.kernel is None:
return
self.update_buffer_ui_thread()
self.scene.Refresh()
self.screen_refresh_is_requested = False
self.screen_refresh_is_running = False
def update_buffer_ui_thread(self):
"""Performs the redraw of the data in the UI thread."""
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
dc.SetBackground(self.background_brush)
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
gc.SetTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(self.matrix)))
font = wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.BLACK)
self.on_draw_scene(gc)
gc.SetTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix()))
self.on_draw_interface(gc)
gc.Destroy()
del dc
def on_matrix_change(self):
self.guide_lines = None
def scene_matrix_reset(self):
self.matrix.reset()
self.on_matrix_change()
def scene_post_scale(self, sx, sy=None, ax=0, ay=0):
self.matrix.post_scale(sx, sy, ax, ay)
self.on_matrix_change()
def scene_post_pan(self, px, py):
self.matrix.post_translate(px, py)
self.on_matrix_change()
def scene_post_rotate(self, angle, rx=0, ry=0):
self.matrix.post_rotate(angle, rx, ry)
self.on_matrix_change()
def scene_pre_scale(self, sx, sy=None, ax=0, ay=0):
self.matrix.pre_scale(sx, sy, ax, ay)
self.on_matrix_change()
def scene_pre_pan(self, px, py):
self.matrix.pre_translate(px, py)
self.on_matrix_change()
def scene_pre_rotate(self, angle, rx=0, ry=0):
self.matrix.pre_rotate(angle, rx, ry)
self.on_matrix_change()
def get_scale_x(self):
return self.matrix.value_scale_x()
def get_scale_y(self):
return self.matrix.value_scale_y()
def get_skew_x(self):
return self.matrix.value_skew_x()
def get_skew_y(self):
return self.matrix.value_skew_y()
def get_translate_x(self):
return self.matrix.value_trans_x()
def get_translate_y(self):
return self.matrix.value_trans_y()
def on_mousewheel(self, event):
rotation = event.GetWheelRotation()
mouse = event.GetPosition()
if self.kernel.mouse_zoom_invert:
rotation = -rotation
if rotation > 1:
self.scene_post_scale(1.1, 1.1, mouse[0], mouse[1])
elif rotation < -1:
self.scene_post_scale(0.9, 0.9, mouse[0], mouse[1])
self.request_refresh()
def on_mouse_middle_down(self, event):
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
self.scene.CaptureMouse()
self.previous_window_position = event.GetPosition()
self.previous_scene_position = self.convert_window_to_scene(self.previous_window_position)
def on_mouse_middle_up(self, event):
if self.scene.HasCapture():
self.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
self.scene.ReleaseMouse()
self.previous_window_position = None
self.previous_scene_position = None
def on_left_mouse_down(self, event):
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
self.scene.CaptureMouse()
self.previous_window_position = event.GetPosition()
self.previous_scene_position = self.convert_window_to_scene(self.previous_window_position)
self.root.set_selected_by_position(self.previous_scene_position)
self.mouse_move_function = self.move_selected
self.request_refresh()
def on_left_mouse_up(self, event):
if self.scene.HasCapture():
self.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
self.scene.ReleaseMouse()
self.previous_window_position = None
self.previous_scene_position = None
self.mouse_move_function = self.move_pan
def on_mouse_double_click(self, event):
position = event.GetPosition()
position = self.convert_window_to_scene(position)
self.root.set_selected_by_position(position)
self.root.activate_selected_node()
def move_pan(self, wdx, wdy, sdx, sdy):
self.scene_post_pan(wdx, wdy)
self.request_refresh()
def move_selected(self, wdx, wdy, sdx, sdy):
self.root.move_selected(sdx, sdy)
self.request_refresh()
def on_mouse_move(self, event):
if not event.Dragging():
return
else:
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
if self.previous_window_position is None:
return
pos = event.GetPosition()
window_position = pos.x, pos.y
scene_position = self.convert_window_to_scene([window_position[0], window_position[1]])
sdx = (scene_position[0] - self.previous_scene_position[0])
sdy = (scene_position[1] - self.previous_scene_position[1])
wdx = (window_position[0] - self.previous_window_position[0])
wdy = (window_position[1] - self.previous_window_position[1])
self.mouse_move_function(wdx, wdy, sdx, sdy)
self.previous_window_position = window_position
self.previous_scene_position = scene_position
def on_right_mouse_down(self, event):
self.popup_window_position = event.GetPosition()
self.popup_scene_position = self.convert_window_to_scene(self.popup_window_position)
self.root.set_selected_by_position(self.popup_scene_position)
if len(self.root.selected_elements) == 0:
return
self.root.create_menu(self, self.root.selected_elements[0])
def on_right_mouse_up(self, event):
self.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
def default_keymap(self):
self.kernel.keymap[wx.WXK_ESCAPE] = MappedKey("escape", "window Adjustments")
self.kernel.keymap[wx.WXK_RIGHT] = MappedKey("right", "move right 1mm")
self.kernel.keymap[wx.WXK_LEFT] = MappedKey("left", "move left 1mm")
self.kernel.keymap[wx.WXK_UP] = MappedKey("up", "move up 1mm")
self.kernel.keymap[wx.WXK_DOWN] = MappedKey("down", "move down 1mm")
self.kernel.keymap[ord('1')] = MappedKey('1', "set_position 1")
self.kernel.keymap[ord('2')] = MappedKey('2', "set_position 2")
self.kernel.keymap[ord('3')] = MappedKey('3', "set_position 3")
self.kernel.keymap[ord('4')] = MappedKey('4', "set_position 4")
self.kernel.keymap[ord('5')] = MappedKey('5', "set_position 5")
self.kernel.keymap[wx.WXK_F4] = MappedKey('F4', "window CameraInterface")
self.kernel.keymap[wx.WXK_F6] = MappedKey('F6', "window JobSpooler")
self.kernel.keymap[wx.WXK_F7] = MappedKey('F7', "window Controller")
self.kernel.keymap[wx.WXK_F8] = MappedKey('F8', "control Path")
self.kernel.keymap[wx.WXK_F9] = MappedKey('F9', "control Transform")
def execute_string_action(self, action, *args):
device = self.kernel.device
if device is None:
return
spooler = device.spooler
if action == 'move':
spooler.send_job(self.execute_move_action(*args))
elif action == 'move_to':
spooler.send_job(self.execute_move_to_action(*args))
elif action == 'set_position':
self.execute_set_position_action(*args)
elif action == 'window':
self.execute_open_window_action(*args)
elif action == 'control':
self.execute_execute_control(*args)
def execute_execute_control(self, *args):
self.kernel.execute(args[0])
def execute_open_window_action(self, *args):
window_name = args[0]
if window_name in self.kernel.windows:
self.kernel.open_window(window_name)
def execute_set_position_action(self, index):
x = self.kernel.device.current_x
y = self.kernel.device.current_y
self.kernel.keymap[ord(index)] = MappedKey(index, "move_to %d %d" % (x, y))
def execute_move_action(self, direction, amount):
min_dim = min(self.kernel.window_width, self.kernel.window_height)
amount = Length(amount).value(ppi=1000.0, relative_length=min_dim)
x = 0
y = 0
if direction == 'right':
x = amount
elif direction == 'left':
x = -amount
elif direction == 'up':
y = -amount
elif direction == 'down':
y = amount
def move():
yield COMMAND_SET_INCREMENTAL
yield COMMAND_RAPID_MOVE, (x, y)
yield COMMAND_SET_ABSOLUTE
return move
def execute_move_to_action(self, position_x, position_y):
def move():
yield COMMAND_RAPID_MOVE, (int(position_x), int(position_y))
return move
def on_key_press(self, event):
keycode = event.GetKeyCode()
if event.ControlDown():
pass
if event.AltDown():
pass
if event.ShiftDown():
pass
if event.MetaDown():
pass
if keycode in self.kernel.keymap:
action = self.kernel.keymap[keycode].command
args = str(action).split(' ')
self.execute_string_action(*args)
def focus_on_elements(self):
bbox = self.root.bounds
if bbox is None:
return
self.focus_viewport_scene(bbox)
self.request_refresh()
def focus_position_scene(self, scene_point):
window_width, window_height = self.scene.ClientSize
scale_x = self.get_scale_x()
scale_y = self.get_scale_y()
self.scene_matrix_reset()
self.scene_post_pan(-scene_point[0], -scene_point[1])
self.scene_post_scale(scale_x, scale_y)
self.scene_post_pan(window_width / 2.0, window_height / 2.0)
def focus_viewport_scene(self, new_scene_viewport, buffer=0.0, lock=True):
window_width, window_height = self.scene.ClientSize
left = new_scene_viewport[0]
top = new_scene_viewport[1]
right = new_scene_viewport[2]
bottom = new_scene_viewport[3]
viewport_width = right - left
viewport_height = bottom - top
left -= viewport_width * buffer
right += viewport_width * buffer
top -= viewport_height * buffer
bottom += viewport_height * buffer
if right == left:
scale_x = 100
else:
scale_x = window_width / float(right - left)
if bottom == top:
scale_y = 100
else:
scale_y = window_height / float(bottom - top)
cx = ((right + left) / 2)
cy = ((top + bottom) / 2)
self.matrix.reset()
self.matrix.post_translate(-cx, -cy)
if lock:
scale = min(scale_x, scale_y)
if scale != 0:
self.matrix.post_scale(scale)
else:
if scale_x != 0 and scale_y != 0:
self.matrix.post_scale(scale_x, scale_y)
self.matrix.post_translate(window_width / 2.0, window_height / 2.0)
def convert_scene_to_window(self, position):
point = self.matrix.point_in_matrix_space(position)
return point[0], point[1]
def convert_window_to_scene(self, position):
point = self.matrix.point_in_inverse_space(position)
return point[0], point[1]
def calculate_grid(self):
if self.kernel.device is not None:
v = self.kernel.device
else:
v = self.kernel
p = self.kernel
wmils = v.bed_width * MILS_IN_MM
hmils = v.bed_height * MILS_IN_MM
convert = p.units_convert
marks = p.units_marks
step = convert * marks
starts = []
ends = []
if step == 0:
self.grid = None
return starts, ends
x = 0.0
while x < wmils:
starts.append((x, 0))
ends.append((x, hmils))
x += step
y = 0.0
while y < hmils:
starts.append((0, y))
ends.append((wmils, y))
y += step
self.grid = starts, ends
def on_draw_grid(self, gc):
# Convert to GC.
if self.grid is None:
self.calculate_grid()
starts, ends = self.grid
gc.StrokeLineSegments(starts, ends)
def on_draw_guides(self, gc):
w, h = self.Size
p = self.kernel
scaled_conversion = p.units_convert * self.matrix.value_scale_x()
if scaled_conversion == 0:
return
wpoints = w / 15.0
hpoints = h / 15.0
points = min(wpoints, hpoints)
# tweak the scaled points into being useful.
# points = scaled_conversion * round(points / scaled_conversion * 10.0) / 10.0
points = scaled_conversion * float('{:.1g}'.format(points / scaled_conversion))
sx, sy = self.convert_scene_to_window([0, 0])
if points == 0:
return
offset_x = sx % points
offset_y = sy % points
starts = []
ends = []
x = offset_x
length = 50
font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.BLACK)
while x < w:
starts.append((x, 0))
ends.append((x, length))
starts.append((x, h))
ends.append((x, h - length))
mark_point = (x - sx) / scaled_conversion
if round(mark_point * 1000) == 0:
mark_point = 0.0 # prevents -0
gc.DrawText("%g %s" % (mark_point, p.units_name), x, 0, -tau / 4)
x += points
y = offset_y
while y < h:
starts.append((0, y))
ends.append((length, y))
starts.append((w, y))
ends.append((w - length, y))
mark_point = (y - sy) / scaled_conversion
if round(mark_point * 1000) == 0:
mark_point = 0.0 # prevents -0
gc.DrawText("%g %s" % (mark_point + 0, p.units_name), 0, y + 0)
y += points
gc.StrokeLineSegments(starts, ends)
def on_draw_interface(self, gc):
pen = wx.Pen(wx.BLACK)
pen.SetWidth(1)
pen.SetCap(wx.CAP_BUTT)
gc.SetPen(pen)
if self.kernel.draw_mode & 2 == 0:
self.on_draw_guides(gc)
if self.kernel.draw_mode & 16 == 0:
# Draw Reticle
gc.SetPen(wx.RED_PEN)
gc.SetBrush(wx.TRANSPARENT_BRUSH)
try:
x = self.kernel.device.current_x
y = self.kernel.device.current_y
x, y = self.convert_scene_to_window([x, y])
gc.DrawEllipse(x - 5, y - 5, 10, 10)
except AttributeError:
pass
def on_draw_bed(self, gc):
if self.kernel.device is not None:
v = self.kernel.device
else:
v = self.kernel
wmils = v.bed_width * MILS_IN_MM
hmils = v.bed_height * MILS_IN_MM
if self.background is None:
gc.SetBrush(wx.WHITE_BRUSH)
gc.DrawRectangle(0, 0, wmils, hmils)
else:
gc.DrawBitmap(self.background, 0, 0, wmils, hmils)
def on_draw_selection(self, gc, draw_mode):
"""Draw Selection Box"""
bounds = self.root.bounds
if bounds is not None:
linewidth = 3.0 / self.matrix.value_scale_x()
self.selection_pen.SetWidth(linewidth)
font = wx.Font(14.0 / self.matrix.value_scale_x(), wx.SWISS, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.BLACK)
gc.SetPen(self.selection_pen)
gc.SetBrush(wx.BLACK_BRUSH)
x0, y0, x1, y1 = bounds
center_x = (x0 + x1) / 2.0
center_y = (y0 + y1) / 2.0
gc.StrokeLine(center_x, 0, center_x, y0)
gc.StrokeLine(0, center_y, x0, center_y)
gc.StrokeLine(x0, y0, x1, y0)
gc.StrokeLine(x1, y0, x1, y1)
gc.StrokeLine(x1, y1, x0, y1)
gc.StrokeLine(x0, y1, x0, y0)
if draw_mode & 128 == 0:
p = self.kernel
conversion, name, marks, index = p.units_convert, p.units_name, p.units_marks, p.units_index
gc.DrawText("%.1f%s" % (y0 / conversion, name), center_x, y0)
gc.DrawText("%.1f%s" % (x0 / conversion, name), x0, center_y)
gc.DrawText("%.1f%s" % ((y1 - y0) / conversion, name), x1, center_y)
gc.DrawText("%.1f%s" % ((x1 - x0) / conversion, name), center_x, y1)
def on_draw_laserpath(self, gc, draw_mode):
gc.SetPen(wx.BLUE_PEN)
starts, ends = self.laserpath
gc.StrokeLineSegments(starts, ends)
def on_draw_scene(self, gc):
self.on_draw_bed(gc)
gc.SetPen(wx.BLACK_PEN)
if self.kernel.draw_mode & 4 == 0:
self.on_draw_grid(gc)
pen = wx.Pen(wx.BLACK)
pen.SetWidth(1)
pen.SetCap(wx.CAP_BUTT)
gc.SetPen(pen)
if self.kernel is None:
return
self.renderer.render(gc, self.kernel.draw_mode)
if self.kernel.draw_mode & 32 == 0:
self.on_draw_selection(gc, self.kernel.draw_mode)
if self.kernel.draw_mode & 8 == 0:
self.on_draw_laserpath(gc, self.kernel.draw_mode)
def on_click_new(self, event): # wxGlade: MeerK40t.<event_handler>
self.working_file = None
self.kernel.elements = []
self.kernel.operations = []
self.kernel.filenodes = {}
self.request_refresh()
self.kernel.signal('rebuild_tree', 0)
def on_click_open(self, event): # wxGlade: MeerK40t.<event_handler>
# This code should load just specific project files rather than all importable formats.
files = self.kernel.load_types()
with wx.FileDialog(self, _("Open"), wildcard=files,
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
pathname = fileDialog.GetPath()
self.load(pathname)
def on_click_save(self, event):
if self.working_file is None:
self.on_click_save_as(event)
else:
self.kernel.save(self.working_file)
def on_click_save_as(self, event):
files = self.kernel.save_types()
with wx.FileDialog(self, "Save Project", wildcard=files,
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
pathname = fileDialog.GetPath()
if not pathname.lower().endswith('.svg'):
pathname += '.svg'
self.kernel.save(pathname)
self.working_file = pathname
def on_click_exit(self, event): # wxGlade: MeerK40t.<event_handler>
self.Close()
def on_click_zoom_out(self, event): # wxGlade: MeerK40t.<event_handler>
"""
Zoomout button press
"""
m = self.scene.ClientSize / 2
self.scene_post_scale(1.0 / 1.5, 1.0 / 1.5, m[0], m[1])
self.request_refresh()
def on_click_zoom_in(self, event): # wxGlade: MeerK40t.<event_handler>
"""
Zoomin button press
"""
m = self.scene.ClientSize / 2
self.scene_post_scale(1.5, 1.5, m[0], m[1])
self.request_refresh()
def on_click_zoom_size(self, event): # wxGlade: MeerK40t.<event_handler>
"""
Zoom size button press.
"""
self.focus_on_elements()
def toggle_draw_mode(self, bits):
"""
Toggle the draw mode.
:param bits: Bit to toggle.
:return: Toggle function.
"""
def toggle(event):
self.kernel.draw_mode ^= bits
self.request_refresh()
return toggle
def open_speedcode_gear_dialog(self):
dlg = wx.TextEntryDialog(self, _("Enter Forced Gear"), _("Gear Entry"), '')
dlg.SetValue('')
if dlg.ShowModal() == wx.ID_OK:
value = dlg.GetValue()
if value in ('0', '1', '2', '3', '4'):
self.kernel._stepping_force = int(value)
else:
self.kernel._stepping_force = None
dlg.Destroy()
def open_fps_dialog(self):
dlg = wx.TextEntryDialog(self, _("Enter FPS Limit"), _("FPS Limit Entry"), '')
dlg.SetValue('')
if dlg.ShowModal() == wx.ID_OK:
fps = dlg.GetValue()
try:
self.set_fps(int(fps))
except ValueError:
pass
dlg.Destroy()
def open_transform_dialog(self):
dlg = wx.TextEntryDialog(self, _(
"Enter SVG Transform Instruction e.g. 'scale(1.49, 1, $x, $y)', rotate, translate, etc..."),
_("Transform Entry"), '')
dlg.SetValue('')
if dlg.ShowModal() == wx.ID_OK:
p = self.kernel.device
m = str(dlg.GetValue())
m = m.replace('$x', str(p.current_x))
m = m.replace('$y', str(p.current_y))
mx = Matrix(m)
wmils = p.bed_width * 39.37
hmils = p.bed_height * 39.37
mx.render(ppi=1000, width=wmils, height=hmils)
if mx.is_identity():
dlg.Destroy()
dlg = wx.MessageDialog(None, _("The entered command does nothing."),
_("Non-Useful Matrix."), wx.OK | wx.ICON_WARNING)
result = dlg.ShowModal()
dlg.Destroy()
else:
for element in self.kernel.elements:
try:
element *= mx
except AttributeError:
pass
self.kernel.signal('rebuild_tree', 0)
def open_path_dialog(self):
dlg = wx.TextEntryDialog(self, _("Enter SVG Path Data"), _("Path Entry"), '')
dlg.SetValue('')
if dlg.ShowModal() == wx.ID_OK:
path = Path(dlg.GetValue())
path.stroke = 'blue'
p = abs(path)
self.kernel.elements.append(p)
self.kernel.classify(p)
self.kernel.signal("rebuild_tree", 0)
dlg.Destroy()
def run_home_and_dot_test(self):
self.kernel.signal("rebuild_tree", 0)
def home_dot_test():
for i in range(25):
yield COMMAND_SET_ABSOLUTE
yield COMMAND_MODE_DEFAULT
yield COMMAND_HOME
yield COMMAND_WAIT_BUFFER_EMPTY
yield COMMAND_RAPID_MOVE, (3000, 3000)
yield COMMAND_LOCK
yield COMMAND_WAIT_BUFFER_EMPTY
yield COMMAND_LASER_ON
yield COMMAND_WAIT, 0.05
yield COMMAND_LASER_OFF
yield COMMAND_LOCK
yield COMMAND_WAIT_BUFFER_EMPTY
yield COMMAND_HOME
yield COMMAND_WAIT_BUFFER_EMPTY
self.kernel.device.spooler.send_job(home_dot_test)
def launch_webpage(self, event): # wxGlade: MeerK40t.<event_handler>
"""
Launch webpage
:param event:
:return:
"""
import webbrowser
webbrowser.open(MEERK40T_WEBSITE, new=0, autoraise=True)
NODE_ROOT = 0
NODE_OPERATION_BRANCH = 10
NODE_OPERATION = 11
NODE_OPERATION_ELEMENT = 12
NODE_ELEMENTS_BRANCH = 20
NODE_ELEMENT = 21
NODE_FILES_BRANCH = 30
NODE_FILE_FILE = 31
NODE_FILE_ELEMENT = 32
class Node(list):
"""
Generic Node Type for use with RootNode
Creating the object registers the position in the tree according to the parent and root.
Deleting the object deregisters the node in the tree.
"""
def __init__(self, node_type, data_object, parent, root, pos=None, name=None):
list.__init__(self)
self.parent = parent
self.root = root
self.object = data_object
if name is None:
self.name = str(data_object)
else:
self.name = name
if len(self.name) >= 27:
self.name = self.name[:28] + '...'
self.type = node_type
parent.append(self)
self.filepath = None
try:
self.bounds = data_object.bbox()
except AttributeError:
self.bounds = None
parent_item = parent.item
tree = root.tree
if pos is None:
item = tree.AppendItem(parent_item, self.name)
else:
item = tree.InsertItem(parent_item, pos, self.name)
self.item = item
if id(data_object) in self.root.tree_lookup:
self.root.tree_lookup[id(data_object)].append(self)
else:
self.root.tree_lookup[id(data_object)] = [self]
tree.SetItemData(self.item, self)
try:
stroke = data_object.values[SVG_ATTR_STROKE]
color = wx.Colour(swizzlecolor(Color(stroke).value))
tree.SetItemTextColour(item, color)
except AttributeError:
pass
except KeyError:
pass
except TypeError:
pass
self.set_icon()
root.notify_added(self)
def __str__(self):
return "Node(%s, %d)" % (str(self.item), self.type)
def __repr__(self):
return "Node(%d, %s, %s, %s)" % (self.type, str(self.object), str(self.parent), str(self.root))
def update_name(self):
self.name = str(self.object)
if len(self.name) >= 27:
self.name = self.name[:28] + '...'
self.root.tree.SetItemText(self.item, self.name)
try:
stroke = self.object.values[SVG_ATTR_STROKE]
color = wx.Colour(swizzlecolor(Color(stroke).value))
self.root.tree.SetItemTextColour(self.item, color)
except AttributeError:
pass
def remove_node(self):
for q in self:
q.remove_node()
root = self.root
links = root.tree_lookup[id(self.object)]
links.remove(self)
self.parent.remove(self)
try:
root.tree.Delete(self.item)
except RuntimeError:
return
root.notify_removed(self)
self.item = None
self.parent = None
self.root = None
self.type = -1
def move_node(self, new_parent, pos=None):
tree = self.root.tree
item = self.item
image = tree.GetItemImage(item)
data = tree.GetItemData(item)
color = tree.GetItemTextColour(item)
tree.Delete(item)
if pos is None:
self.item = tree.AppendItem(new_parent.item, self.name)
else:
self.item = tree.InsertItem(new_parent.item, pos, self.name)
item = self.item
tree.SetItemImage(item, image)
tree.SetItemData(item, data)
tree.SetItemTextColour(item, color)
def __eq__(self, other):
return other is self
def set_icon(self, icon=None):
root = self.root
item = self.item
data_object = self.object
tree = root.tree
if icon is None:
if isinstance(data_object, SVGImage):
image = self.root.renderer.make_thumbnail(data_object, width=20, height=20)
image_id = self.root.tree_images.Add(bitmap=image)
tree.SetItemImage(item, image=image_id)
if isinstance(data_object, Path):
image = self.root.renderer.make_raster(data_object, data_object.bbox(), width=20, height=20,
bitmap=True)
if image is not None:
image_id = self.root.tree_images.Add(bitmap=image)
tree.SetItemImage(item, image=image_id)
tree.Update()
else:
image_id = self.root.tree_images.Add(bitmap=icon)
tree.SetItemImage(item, image=image_id)
def center(self):
try:
bounds = self.bounds
return (bounds[2] + bounds[0]) / 2.0, (bounds[3] + bounds[1]) / 2.0
except Exception:
return None
def bbox(self):
return OperationPreprocessor.bounding_box(self.object)
def objects_of_children(self, types):
if isinstance(self.object, types):
yield self.object
for q in self:
for o in q.objects_of_children(types):
yield o
def contains_path(self):
if isinstance(self.object, Path):
return True
for q in self:
if q.contains_path():
return True
return False
def contains_image(self):
if isinstance(self.object, SVGImage):
return True
for q in self:
if q.contains_image():
return True
return False
def contains_text(self):
if isinstance(self.object, SVGText):
return True
for q in self:
if q.contains_text():
return True
return False
class RootNode(list):
""""Nodes are the presentation layer used to wrap the LaserOperations and the SVGElement classes. Stored in the
kernel. This is to allow nested structures beyond the flat structure of the actual data. It serves to help with
menu creation, name, drag and drop, bounding box cache, tree element updates.
The tree is structured with three main sub-elements of the RootNode, these are the Operations, the Elements, and
the files.
The Operations each contain a list of elements which they run in order and are stored within actual operations.
Elements store the graphics elements stored within the scene. The Elements are a list of elements stored in their
desired ordered. This structure should reflect those changes back to structure in the kernel.
Deleting an element from the tree should remove that element from any operation using it.
Deleting an operation should make no change to the elements structure.
All the nodes store a reference to their given tree item. So that a determination can be made when those items have
changed and provide piecemeal updates to the tree rather than recreating the entire thing.
"""
def __init__(self, kernel, gui):
list.__init__(self)
self.root = self
self.parent = self
self.object = "Project"
self.name = "Project"
self.semi_selected = []
self.highlighted = []
self.type = NODE_ROOT
self.kernel = kernel
self.gui = gui
self.tree = gui.tree
self.renderer = gui.renderer
self.bounds = None
self.selected_elements = []
self.selected_operations = []
self.item = None
self.dragging_node = None
self.dragging_parent = None
self.tree_images = None
self.tree_lookup = None
self.node_elements = None
self.node_operations = None
self.node_files = None
self.rebuild_tree()
def highlight_select(self, item):
if item not in self.highlighted:
self.highlighted.append(item)
self.tree.SetItemBackgroundColour(item, wx.YELLOW)
def highlight_unselect(self):
self.set_selected_elements(None)
self.set_selected_operations(None)
for item in self.highlighted:
self.tree.SetItemBackgroundColour(item, wx.WHITE)
self.highlighted.clear()
def highlight_select_all(self, objects):
for e in objects:
self.highlight_select(e)
def semi_select(self, item):
if item not in self.semi_selected:
self.semi_selected.append(item)
self.tree.SetItemBackgroundColour(item, wx.CYAN)
node = self.tree.GetItemData(item)
if node.type == NODE_ELEMENT:
self.selected_elements.append(node.object)
elif node.type == NODE_OPERATION:
self.selected_operations.append(node.object)
def semi_unselect(self):
self.set_selected_elements(None)
self.set_selected_operations(None)
for item in self.semi_selected:
self.tree.SetItemBackgroundColour(item, wx.WHITE)
self.semi_selected.clear()
def semi_select_all(self, objects):
for e in objects:
self.semi_select(e)
def rebuild_tree(self):
self.semi_selected.clear()
self.highlighted.clear()
self.tree.DeleteAllItems()
self.tree_images = wx.ImageList()
self.tree_images.Create(width=20, height=20)
self.tree_lookup = {}
self.tree.SetImageList(self.tree_images)
self.item = self.tree.AddRoot(self.name)
self.node_operations = Node(NODE_OPERATION_BRANCH, self.kernel.operations, self, self, name=_("Operations"))
self.node_operations.set_icon(icons8_laser_beam_20.GetBitmap())
self.build_tree(self.node_operations, self.kernel.operations)
for n in self.node_operations:
if isinstance(n.object, RasterOperation):
n.set_icon(icons8_direction_20.GetBitmap())
else:
n.set_icon(icons8_laser_beam_20.GetBitmap())
self.node_elements = Node(NODE_ELEMENTS_BRANCH, self.kernel.elements, self, self, name=_("Elements"))
self.node_elements.set_icon(icons8_vector_20.GetBitmap())
self.build_tree(self.node_elements, self.kernel.elements)
self.node_files = Node(NODE_FILES_BRANCH, self.kernel.filenodes, self, self, name=_("Files"))
self.node_files.set_icon(icons8_file_20.GetBitmap())
self.build_tree(self.node_files, self.kernel.filenodes)
for n in self.node_files:
n.set_icon(icons8_file_20.GetBitmap())
self.tree.ExpandAll()
def build_tree(self, parent_node, objects):
if isinstance(objects, list):
for obj in objects:
node = Node(parent_node.type + 1, obj, parent_node, self)
self.build_tree(node, obj)
elif isinstance(objects, dict):
for obj_key, obj_value in objects.items():
node = Node(parent_node.type + 1, obj_key, parent_node, self)
node.filepath = obj_key
if not isinstance(obj_value, (list, dict)):
obj_value = [obj_value]
self.build_tree(node, obj_value)
def notify_added(self, node):
pass
def notify_removed(self, node):
pass
def notify_tree_data_change(self):
self.kernel.signal("rebuild_tree", 0)
def notify_tree_data_cleared(self):
self.kernel.signal("rebuild_tree", 0)
def on_element_update(self, *args):
element = args[0]
try:
nodes = self.tree_lookup[id(element)]
for node in nodes:
node.update_name()
except KeyError:
pass
def set_selected_elements(self, selected):
self.selected_operations.clear()
self.selected_elements.clear()
if selected is not None:
if not isinstance(selected, list):
self.selected_elements.append(selected)
else:
self.selected_elements.extend(selected)
self.selection_updated()
def set_selected_operations(self, selected):
self.selected_operations.clear()
self.selected_elements.clear()
if selected is not None:
if not isinstance(selected, list):
self.selected_operations.append(selected)
else:
self.selected_operations.extend(selected)
self.selection_updated()
def selection_updated(self):
self.kernel.signal("selected_ops", self.selected_operations)
self.kernel.signal("selected_elements", self.selected_elements)
self.selection_bounds_updated()
def selection_bounds_updated(self):
self.bounds = OperationPreprocessor.bounding_box(self.selected_elements)
self.kernel.signal("selected_bounds", self.bounds)
def activate_selected_node(self):
if self.selected_elements is not None and len(self.selected_elements) != 0:
self.activated_object(self.selected_elements[0])
def move_selected(self, dx, dy):
if self.selected_elements is None:
return
if len(self.selected_elements) == 0:
return
for obj in self.selected_elements:
obj.transform.post_translate(dx, dy)
b = self.bounds
self.bounds = [b[0] + dx, b[1] + dy, b[2] + dx, b[3] + dy]
self.kernel.signal("selected_bounds", self.bounds)
def on_drag_begin_handler(self, event):
"""
Drag handler begin for the tree.
:param event:
:return:
"""
self.dragging_node = None
drag_item = event.GetItem()
node_data = self.tree.GetItemData(drag_item)
if node_data.type == NODE_ELEMENTS_BRANCH or node_data.type == NODE_OPERATION_BRANCH or \
node_data.type == NODE_FILES_BRANCH or node_data.type == NODE_FILE_ELEMENT or node_data.type == NODE_FILE_FILE:
event.Skip()
return
self.dragging_node = node_data
event.Allow()
def on_drag_end_handler(self, event):
"""
Drag end handler for the tree
:param event:
:return:
"""
if self.dragging_node is None:
event.Skip()
return
drag_node = self.dragging_node
self.dragging_node = None
drop_item = event.GetItem()
if drop_item is None:
event.Skip()
return
if drop_item.ID is None:
event.Skip()
return
drop_node = self.tree.GetItemData(drop_item)
if drop_node is None or drop_node == drag_node:
event.Skip()
return
if drag_node.type == NODE_ELEMENT:
if drop_node.type == NODE_OPERATION:
# Dragging element into operation adds that element to the op.
drop_node.object.insert(0, drag_node.object)
self.notify_tree_data_change()
event.Allow()
return
elif drop_node.type == NODE_ELEMENT:
# Dragging element into element.
if drag_node.parent is drop_node.parent:
# Dragging and dropping within the same parent puts insert on other side.
drag_index = drag_node.parent.index(drag_node)
drag_node.parent.object[drag_index] = None
drop_index = drop_node.parent.index(drop_node)
if drag_index > drop_index:
drop_node.parent.object.insert(drop_index, drag_node.object)
else:
drop_node.parent.object.insert(drop_index + 1, drag_node.object)
else:
drag_index = drag_node.parent.index(drag_node)
drag_node.parent.object[drag_index] = None
drop_index = drop_node.parent.index(drop_node)
drop_node.parent.object.insert(drop_index, drag_node.object)
nodes = [n for n in drag_node.parent.object if n is not None]
drag_node.parent.object.clear()
drag_node.parent.object.extend(nodes)
self.notify_tree_data_change()
event.Allow()
return
elif drop_node.type == NODE_OPERATION_ELEMENT:
drop_index = drop_node.parent.object.index(drop_node.object)
drop_node.parent.object.insert(drop_index, drag_node.object)
event.Allow()
self.notify_tree_data_change()
return
elif drop_node.type == NODE_OPERATION_BRANCH:
obj = drag_node.object
self.kernel.classify(obj)
event.Allow()
self.notify_tree_data_change()
elif drag_node.type == NODE_OPERATION_ELEMENT:
if drop_node.type == NODE_OPERATION:
# Dragging from op element to operation.
drag_index = drag_node.parent.index(drag_node)
drag_node.parent.object[drag_index] = None
drop_node.object.append(drag_node.object)
nodes = [op_elem for op_elem in drag_node.parent.object if op_elem is not None]
drag_node.parent.object.clear()
drag_node.parent.object.extend(nodes)
event.Allow()
self.notify_tree_data_change()
return
if drop_node.type == NODE_OPERATION_ELEMENT:
if drag_node.parent is drop_node.parent:
# Dragging and dropping within the same parent puts insert on other side.
drag_index = drag_node.parent.index(drag_node)
drag_node.parent.object[drag_index] = None
drop_index = drop_node.parent.index(drop_node)
if drag_index > drop_index:
drop_node.parent.object.insert(drop_index, drag_node.object)
else:
drop_node.parent.object.insert(drop_index + 1, drag_node.object)
else:
drag_index = drag_node.parent.index(drag_node)
drag_node.parent.object[drag_index] = None
drop_index = drop_node.parent.index(drop_node)
drop_node.parent.object.insert(drop_index, drag_node.object)
nodes = [n for n in drag_node.parent.object if n is not None]
drag_node.parent.object.clear()
drag_node.parent.object.extend(nodes)
event.Allow()
self.notify_tree_data_change()
return
elif drag_node.type == NODE_OPERATION:
if drop_node.type == NODE_OPERATION:
# Dragging operation to different operation.
ops = drop_node.parent
drop_pos = ops.index(drop_node)
drag_pos = ops.index(drag_node)
ops.object[drag_pos] = None
if drag_pos > drop_pos:
ops.object.insert(drop_pos, drag_node.object)
else:
ops.object.insert(drop_pos + 1, drag_node.object)
nodes = [n for n in ops.object if n is not None]
ops.object.clear()
ops.object.extend(nodes)
event.Allow()
self.notify_tree_data_change()
return
elif drop_node.type == NODE_OPERATION_BRANCH:
# Dragging operation to op branch.
pass
event.Skip()
# Do not allow images added to engrave or cut operations
# Group dragged into group, creates subgroup.
# LaserOperation Elements dragged from one LaserOperation to another.
def on_item_right_click(self, event):
"""
Right click of element in tree.
:param event:
:return:
"""
item = event.GetItem()
if item is None:
return
node = self.tree.GetItemData(item)
self.root.create_menu(self.gui, node)
event.Skip()
def on_item_activated(self, event):
"""
Tree item is double-clicked. Launches PropertyWindow associated with that object.
:param event:
:return:
"""
item = event.GetItem()
node = self.tree.GetItemData(item)
self.activated_node(node)
def activated_node(self, node):
if node is not None:
self.activated_object(node.object)
def activated_object(self, obj):
if isinstance(obj, RasterOperation):
self.kernel.open_window("RasterProperty").set_operation(obj)
elif isinstance(obj, (CutOperation, EngraveOperation)):
self.kernel.open_window("EngraveProperty").set_operation(obj)
elif isinstance(obj, Path):
self.kernel.open_window("PathProperty").set_element(obj)
elif isinstance(obj, SVGText):
self.kernel.open_window("TextProperty").set_element(obj)
elif isinstance(obj, SVGImage):
self.kernel.open_window("ImageProperty").set_element(obj)
elif isinstance(obj, SVGElement):
self.kernel.open_window("PathProperty").set_element(obj)
elif isinstance(obj, LaserOperation):
self.kernel.open_window("EngraveProperty").set_operation(obj)
def on_item_changed(self, event):
"""
Tree menu item is changed. Modify the selection.
:param event:
:return:
"""
item = event.GetItem()
node = self.tree.GetItemData(item)
if node is None:
return
self.semi_unselect()
self.highlight_unselect()
self.semi_select_all(self.tree.GetSelections())
if node.type == NODE_ELEMENTS_BRANCH:
for n in self.node_elements:
self.semi_select(n.item)
self.gui.request_refresh()
self.selection_updated()
return
elif node.type == NODE_OPERATION:
for n in node:
self.highlight_select(n.item)
self.gui.request_refresh()
self.selection_updated()
return
elif node.type == NODE_FILE_FILE:
for n in node:
obj = n.object
links = self.tree_lookup[id(obj)]
for link in links:
self.semi_select(link.item)
self.gui.request_refresh()
self.selection_updated()
return
elif node.type == NODE_OPERATION_ELEMENT:
obj = node.object
if len(self.semi_selected) != 1:
return # If this is a multi-selection event, do not select other nodeop_elements
links = self.tree_lookup[id(obj)]
for link in links:
self.semi_select(link.item)
self.selection_updated()
return
elif node.type == NODE_ELEMENT:
for item in self.tree.GetSelections():
node = self.tree.GetItemData(item)
obj = node.object
links = self.tree_lookup[id(obj)]
for link in links:
self.semi_select(link.item)
self.selection_updated()
return
self.gui.request_refresh()
self.selection_updated()
event.Allow()
def set_selected_by_position(self, position):
if self.selected_elements is not None:
if self.bounds is not None and self.contains(self.bounds, position):
return # Select by position aborted since selection position within current select bounds.
self.selected_elements.clear()
for e in reversed(self.kernel.elements):
bounds = e.bbox()
if bounds is None:
continue
if self.contains(bounds, position):
self.set_selected_elements(e)
return
self.selection_updated()
def contains(self, box, x, y=None):
if y is None:
x, y = x
return box[0] <= x <= box[2] and box[1] <= y <= box[3]
def create_menu(self, gui, node):
"""
Create menu items. This is used for both the scene and the tree to create menu items.
:param gui: Gui used to create menu items.
:param node: The Node clicked on for the generated menu.
:return:
"""
if node is None:
return
if isinstance(node, SVGElement):
# If this is called with an SVGElement rather than a Node. Convert them.
match_object = node
node = None
for element in self.node_elements:
if element.object is match_object:
node = element
break
if node is None:
return
menu = wx.Menu()
if isinstance(node, RootNode):
return
t = node.type
selections = [self.tree.GetItemData(e) for e in self.semi_selected]
selections = [s for s in selections if s.type == t]
if t == NODE_OPERATION:
gui.Bind(wx.EVT_MENU, self.menu_execute(node),
menu.Append(wx.ID_ANY, _("Execute Job"), "", wx.ITEM_NORMAL))
if t in (NODE_OPERATION_BRANCH, NODE_FILES_BRANCH, NODE_ELEMENTS_BRANCH, NODE_OPERATION):
gui.Bind(wx.EVT_MENU, self.menu_clear_all(node),
menu.Append(wx.ID_ANY, _("Clear All"), "", wx.ITEM_NORMAL))
if t in (NODE_OPERATION, NODE_ELEMENT, NODE_FILE_FILE, NODE_OPERATION_ELEMENT):
gui.Bind(wx.EVT_MENU, self.menu_remove(node),
menu.Append(wx.ID_ANY, _("Remove: %s") % str(node.name)[:10], "", wx.ITEM_NORMAL))
if t in (NODE_ELEMENT, NODE_OPERATION_ELEMENT) and len(selections) > 1:
gui.Bind(wx.EVT_MENU, self.menu_remove_multi(node),
menu.Append(wx.ID_ANY, _("Remove: %d objects") % len(selections), "", wx.ITEM_NORMAL))
if t in (NODE_OPERATION, NODE_ELEMENTS_BRANCH, NODE_OPERATION_BRANCH) and len(node) > 1:
gui.Bind(wx.EVT_MENU, self.menu_reverse_order(node),
menu.Append(wx.ID_ANY, _("Reverse Layer Order"), "", wx.ITEM_NORMAL))
if t == NODE_ROOT:
pass
elif t == NODE_OPERATION_BRANCH:
pass
elif t == NODE_ELEMENTS_BRANCH:
gui.Bind(wx.EVT_MENU, self.menu_reclassify_operations(node),
menu.Append(wx.ID_ANY, _("Reclassify Operations"), "", wx.ITEM_NORMAL))
elif t == NODE_FILES_BRANCH:
pass
elif t == NODE_OPERATION:
operation_convert_submenu = wx.Menu()
for name in ("Raster", "Engrave", "Cut"):
menu_op = operation_convert_submenu.Append(wx.ID_ANY, _("Convert %s") % name, "", wx.ITEM_NORMAL)
gui.Bind(wx.EVT_MENU, self.menu_convert_operation(node, name), menu_op)
menu_op.Enable(False)
for name in ("ZDepth_Raster", "Multishade_Raster", "Wait-Step_Raster"):
menu_op = operation_convert_submenu.Append(wx.ID_ANY, _("Convert %s") % name, "", wx.ITEM_NORMAL)
gui.Bind(wx.EVT_MENU, self.menu_convert_operation(node, name), menu_op)
menu_op.Enable(False)
menu.AppendSubMenu(operation_convert_submenu, _("Convert Operation"))
duplicate_menu = wx.Menu()
gui.Bind(wx.EVT_MENU, self.menu_passes(node, 1),
duplicate_menu.Append(wx.ID_ANY, _("Add 1 pass."), "", wx.ITEM_NORMAL))
for i in range(2, 10):
gui.Bind(wx.EVT_MENU, self.menu_passes(node, i),
duplicate_menu.Append(wx.ID_ANY, _("Add %d passes.") % i, "", wx.ITEM_NORMAL))
menu.AppendSubMenu(duplicate_menu, _("Passes"))
if isinstance(node.object, RasterOperation):
raster_step_menu = wx.Menu()
for i in range(1, 10):
menu_item = raster_step_menu.Append(wx.ID_ANY, _("Step %d") % i, "", wx.ITEM_RADIO)
gui.Bind(wx.EVT_MENU, self.menu_raster_step_operation(node, i), menu_item)
step = float(node.object.raster_step)
if i == step:
menu_item.Check(True)
menu.AppendSubMenu(raster_step_menu, _("Step"))
gui.Bind(wx.EVT_MENU, self.menu_raster(node),
menu.Append(wx.ID_ANY, _("Make Raster Image"), "", wx.ITEM_NORMAL))
elif t == NODE_FILE_FILE:
if node.filepath is not None:
name = os.path.basename(node.filepath)
gui.Bind(wx.EVT_MENU, self.menu_reload(node),
menu.Append(wx.ID_ANY, _("Reload %s") % name, "", wx.ITEM_NORMAL))
elif t == NODE_ELEMENT:
duplicate_menu = wx.Menu()
for i in range(1, 10):
gui.Bind(wx.EVT_MENU, self.menu_duplicate(node, i),
duplicate_menu.Append(wx.ID_ANY, _("Make %d copies.") % i, "", wx.ITEM_NORMAL))
menu.AppendSubMenu(duplicate_menu, _("Duplicate"))
gui.Bind(wx.EVT_MENU, self.menu_reset(node),
menu.Append(wx.ID_ANY, _("Reset User Changes"), "", wx.ITEM_NORMAL))
path_scale_sub_menu = wx.Menu()
for i in range(1, 25):
gui.Bind(wx.EVT_MENU, self.menu_scale(node, 6.0 / float(i)),
path_scale_sub_menu.Append(wx.ID_ANY, _("Scale %.0f%%") % (600.0 / float(i)), "",
wx.ITEM_NORMAL))
menu.AppendSubMenu(path_scale_sub_menu, _("Scale"))
path_rotate_sub_menu = wx.Menu()
for i in range(2, 13):
angle = Angle.turns(1.0 / float(i))
gui.Bind(wx.EVT_MENU, self.menu_rotate(node, 1.0 / float(i)),
path_rotate_sub_menu.Append(wx.ID_ANY, _(u"Rotate turn/%d, %.0f░") % (i, angle.as_degrees),
"",
wx.ITEM_NORMAL))
for i in range(2, 13):
angle = Angle.turns(1.0 / float(i))
gui.Bind(wx.EVT_MENU, self.menu_rotate(node, -1.0 / float(i)),
path_rotate_sub_menu.Append(wx.ID_ANY,
_(u"Rotate turn/%d, -%.0f░") % (i, angle.as_degrees), "",
wx.ITEM_NORMAL))
menu.AppendSubMenu(path_rotate_sub_menu, _("Rotate"))
gui.Bind(wx.EVT_MENU, self.menu_reify(node),
menu.Append(wx.ID_ANY, _("Reify User Changes"), "", wx.ITEM_NORMAL))
if isinstance(node.object, Path):
gui.Bind(wx.EVT_MENU, self.menu_subpath(node),
menu.Append(wx.ID_ANY, _("Break Subpaths"), "", wx.ITEM_NORMAL))
if isinstance(node.object, SVGImage):
raster_step_menu = wx.Menu()
for i in range(1, 10):
menu_item = raster_step_menu.Append(wx.ID_ANY, _("Step %d") % i, "", wx.ITEM_RADIO)
gui.Bind(wx.EVT_MENU, self.menu_raster_step_image(node, i), menu_item)
if 'raster_step' in node.object.values:
step = float(node.object.values['raster_step'])
else:
step = 1.0
if i == step:
m = node.object.transform
if m.a == step or m.b == 0.0 or m.c == 0.0 or m.d == step:
menu_item.Check(True)
menu.AppendSubMenu(raster_step_menu, _("Step"))
gui.Bind(wx.EVT_MENU, self.menu_raster_actualize(node),
menu.Append(wx.ID_ANY, _("Actualize Pixels"), "", wx.ITEM_NORMAL))
gui.Bind(wx.EVT_MENU, self.menu_dither(node),
menu.Append(wx.ID_ANY, _("Dither to 1 bit"), "", wx.ITEM_NORMAL))
raster_zdepth_menu = wx.Menu()
for i in range(2, 10):
menu_item = raster_zdepth_menu.Append(wx.ID_ANY, _("Divide Into %d Images") % i, "", wx.ITEM_NORMAL)
gui.Bind(wx.EVT_MENU, self.menu_raster_zdepth(node, i), menu_item)
menu.AppendSubMenu(raster_zdepth_menu, _("ZDepth Divide"))
if isinstance(node.object, SVGText):
gui.Bind(wx.EVT_MENU, self.menu_convert_text(node),
menu.Append(wx.ID_ANY, _("Convert to Raster"), "", wx.ITEM_NORMAL))
if menu.MenuItemCount != 0:
gui.PopupMenu(menu)
menu.Destroy()
def menu_raster_step_operation(self, node, step_value):
"""
Change raster step values of operation
:param node:
:param step_value:
:return:
"""
def specific(event):
element = node.object
if isinstance(element, RasterOperation):
element.raster_step = step_value
self.kernel.signal("element_property_update", node.object)
return specific
def menu_raster_step_image(self, node, step_value):
"""
Change raster step values of subelements.
:param node:
:param step_value:
:return:
"""
def specific(event):
element = node.object
element.values[VARIABLE_NAME_RASTER_STEP] = str(step_value)
m = element.transform
tx = m.e
ty = m.f
element.transform = Matrix.scale(float(step_value), float(step_value))
element.transform.post_translate(tx, ty)
self.kernel.signal("element_property_update", node.object)
self.root.gui.request_refresh()
return specific
def menu_raster_actualize(self, node):
"""
Causes the raster image to be native at the current scale by rotating, scaling, skewing etc.
:param node:
:return:
"""
def specific(event):
element = node.object
if isinstance(element, SVGImage):
OperationPreprocessor.make_actual(element)
node.bounds = None
node.set_icon()
self.selection_bounds_updated()
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_dither(self, node):
"""
Change raster dither forcing raster elements to 1 bit.
:param node:
:return:
"""
def specific(event):
element = node.object
if isinstance(element, SVGImage):
img = element.image
if img.mode == 'RGBA':
pixel_data = img.load()
width, height = img.size
for y in range(height):
for x in range(width):
if pixel_data[x, y][3] == 0:
pixel_data[x, y] = (255, 255, 255, 255)
element.image = img.convert("1")
element.cache = None
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_raster_zdepth(self, node, divide=7):
"""
Subdivides an image into a zdepth image set.
:param node: SVGImage node.
:return: zdepth function
"""
def specific(event):
element = node.object
if not isinstance(element, SVGImage):
return
adding_elements = []
if element.image.mode != 'RGBA':
element.image = element.image.convert('RGBA')
band = 255 / divide
for i in range(0, divide):
threshold_min = i * band
threshold_max = threshold_min + band
image_element = copy(element)
image_element.image = image_element.image.copy()
if OperationPreprocessor.needs_actualization(image_element):
OperationPreprocessor.make_actual(image_element)
img = image_element.image
new_data = img.load()
width, height = img.size
for y in range(height):
for x in range(width):
pixel = new_data[x, y]
if pixel[3] == 0:
new_data[x, y] = (255, 255, 255, 255)
continue
gray = (pixel[0] + pixel[1] + pixel[2]) / 3.0
if threshold_min >= gray:
new_data[x, y] = (0, 0, 0, 255)
elif threshold_max < gray:
new_data[x, y] = (255, 255, 255, 255)
else: # threshold_min <= grey < threshold_max
v = gray - threshold_min
v *= divide
v = int(round(v))
new_data[x, y] = (v, v, v, 255)
image_element.image = image_element.image.convert('1')
adding_elements.append(image_element)
self.kernel.elements.extend(adding_elements)
self.kernel.classify(adding_elements)
self.set_selected_elements(None)
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_raster(self, node):
"""
Convert a vector element into a raster element.
:param node:
:return:
"""
def specific(event):
renderer = self.renderer
child_objects = list(node.objects_of_children(SVGElement))
bounds = OperationPreprocessor.bounding_box(child_objects)
if bounds is None:
return None
step = float(node.object.raster_step)
xmin, ymin, xmax, ymax = bounds
image = renderer.make_raster(child_objects, bounds, width=(xmax - xmin) / step, height=(ymax - ymin) / step)
image_element = SVGImage(image=image)
image_element.transform.post_scale(step, step)
image_element.transform.post_translate(xmin, ymin)
image_element.values['raster_step'] = step
self.kernel.elements.append(image_element)
node.object.clear()
self.build_tree(self.node_elements, image_element)
node.object.append(image_element)
self.selection_bounds_updated()
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_reify(self, node):
"""
Reify elements so that the translations apply direct to the object.
:param node:
:return:
"""
def specific(event):
for element in self.selected_elements:
element.reify()
element.cache = None
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_reset(self, node):
"""
Menu to reset transformations applied to elements.
:param node:
:return:
"""
def specific(event):
for e in self.selected_elements:
e.transform.reset()
self.selection_bounds_updated()
self.gui.request_refresh()
return specific
def menu_rotate(self, node, value):
"""
Menu to rotate an element.
:param node:
:param value:
:return:
"""
value *= tau
def specific(event):
bounds = OperationPreprocessor.bounding_box(node.parent)
center_x = (bounds[2] + bounds[0]) / 2.0
center_y = (bounds[3] + bounds[1]) / 2.0
# center = node.parent.center()
for obj in self.selected_elements:
obj.transform.post_rotate(value, center_x, center_y)
self.selection_bounds_updated()
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_scale(self, node, value):
"""
Menu scale.
:param node:
:param value:
:return:
"""
def specific(event):
bounds = self.bounds
center_x = (bounds[2] + bounds[0]) / 2.0
center_y = (bounds[3] + bounds[1]) / 2.0
# center = node.parent.center()
for obj in self.selected_elements:
obj.transform.post_scale(value, value, center_x, center_y)
self.selection_bounds_updated()
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_reload(self, node):
"""
Menu to reload the element from the file on disk.
:param node:
:return:
"""
def specific(event):
filepath = node.filepath
self.gui.load(filepath)
return specific
def menu_remove_multi(self, remove_node):
"""
Menu to remove an element from the scene.
:param node:
:return:
"""
def specific(event):
node = remove_node
selections = [self.tree.GetItemData(e) for e in self.semi_selected]
selections = [s for s in selections if s.type == node.type]
if node.type == NODE_ELEMENT:
# Removing element can only have 1 copy.
removed_objects = self.selected_elements
for e in removed_objects:
self.kernel.elements.remove(e)
for i in range(len(self.kernel.operations)):
elems = [e for e in self.kernel.operations[i] if e not in removed_objects]
self.kernel.operations[i].clear()
self.kernel.operations[i].extend(elems)
if len(self.kernel.operations[i]) == 0:
self.kernel.operations[i] = None
ops = [op for op in self.kernel.operations if op is not None]
self.kernel.operations.clear()
self.kernel.operations.extend(ops)
elif node.type == NODE_OPERATION_ELEMENT:
# Operation_element can occur many times in the same operation node.
modified = []
for node in selections:
index = node.parent.index(node)
op = node.parent.object
if index == -1:
continue
op[index] = None
if op not in modified:
modified.append(op)
for s in modified:
op_elems = [op_elem for op_elem in s if op_elem is not None]
s.clear()
s.extend(op_elems)
self.set_selected_elements(None)
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_remove(self, remove_node):
"""
Menu to remove an element from the scene.
:param node:
:return:
"""
def specific(event):
node = remove_node
if node.type == NODE_ELEMENT:
# Removing element can only have 1 copy.
# All selected elements are removed.
removed_objects = self.selected_elements
for e in removed_objects:
self.kernel.elements.remove(e)
for i in range(len(self.kernel.operations)):
elems = [e for e in self.kernel.operations[i] if e not in removed_objects]
self.kernel.operations[i].clear()
self.kernel.operations[i].extend(elems)
if len(self.kernel.operations[i]) == 0:
self.kernel.operations[i] = None
ops = [op for op in self.kernel.operations if op is not None]
self.kernel.operations.clear()
self.kernel.operations.extend(ops)
elif node.type == NODE_OPERATION:
# Removing operation can only have 1 copy.
self.kernel.operations.remove(node.object)
elif node.type == NODE_FILE_FILE:
# Removing file can only have 1 copy.
del self.kernel.filenodes[node.filepath]
elif node.type == NODE_OPERATION_ELEMENT:
# Operation_element can occur many times in the same operation node.
index = node.parent.index(node)
op = node.parent.object
if index == -1:
op.remove(node.object)
else:
del op[index]
self.set_selected_elements(None)
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_duplicate(self, node, copies):
"""
Menu to duplicate elements.
:param node:
:return:
"""
def specific(event):
adding_elements = [copy(e) for e in list(self.selected_elements) * copies]
self.kernel.elements.extend(adding_elements)
self.kernel.classify(adding_elements)
self.set_selected_elements(None)
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_passes(self, node, copies):
"""
Menu to duplicate operation element nodes
:param node:
:return:
"""
def specific(event):
op = node.object
adding_elements = list(op) * copies
op.extend(adding_elements)
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_subpath(self, node):
"""
Menu to break element into subpath.
:param node:
:return:
"""
def specific(event):
for e in self.selected_elements:
p = abs(e)
add = []
for subpath in p.as_subpaths():
subelement = Path(subpath)
add.append(subelement)
self.kernel.elements.extend(add)
self.kernel.signal('rebuild_tree', 0)
self.set_selected_elements(None)
return specific
def menu_execute(self, node):
"""
Menu to launch Execute Job for the particular element.
:param node:
:return:
"""
def open_jobinfo_window(event):
self.kernel.open_window("JobInfo").set_operations(self.selected_operations)
return open_jobinfo_window
def menu_reverse_order(self, node):
"""
Menu to return and reverse order of the element to the scene.
:param node:
:return:
"""
def specific(event):
node.object.reverse()
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_clear_all(self, node):
def specific(event):
if node.type == NODE_ELEMENTS_BRANCH:
elements = self.kernel.elements
for i in range(len(self.kernel.operations)):
self.kernel.operations[i] = [e for e in self.kernel.operations[i]
if e not in elements]
if len(self.kernel.operations[i]) == 0:
self.kernel.operations[i] = None
self.kernel.operations = [op for op in self.kernel.operations
if op is not None]
node.object.clear()
self.selection_bounds_updated()
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_reclassify_operations(self, node):
def specific(event):
kernel = node.root.kernel
kernel.operations.clear()
kernel.classify(kernel.elements)
self.kernel.signal('rebuild_tree', 0)
return specific
def menu_convert_operation(self, node, name):
def specific(event):
raise NotImplementedError
return specific
def menu_convert_text(self, node):
def specific(event):
raise NotImplementedError
return specific
class MappedKey:
"""
Mapped key class containing the key and the command.
"""
def __init__(self, key, command):
self.key = key
self.command = command
def __str__(self):
return self.key
class wxMeerK40t(Module, wx.App):
"""
wxMeerK40t is the wx.App main class and a qualified Module for the MeerK40t kernel.
Running MeerK40t without the wxMeerK40t gui is both possible and reasonable. This should not change the way the
underlying code runs. It should just be a series of frames held together with the kernel.
"""
def __init__(self):
wx.App.__init__(self, 0)
Module.__init__(self)
self.locale = None
self.kernel = None
def OnInit(self):
return True
def initialize(self, kernel, name=None):
kernel.setting(wx.App, 'gui', self) # Registers self as kernel.gui
kernel.add_window("MeerK40t", MeerK40t)
self.kernel = kernel
_ = wx.GetTranslation
wx.Locale.AddCatalogLookupPathPrefix('locale')
kernel.run_later = wx.CallAfter
kernel.translation = wx.GetTranslation
kernel.set_config(wx.Config("MeerK40t"))
kernel.setting(int, 'language', None)
kernel.add_window('Shutdown', Shutdown)
kernel.add_window('PathProperty', PathProperty)
kernel.add_window('TextProperty', TextProperty)
kernel.add_window('ImageProperty', ImageProperty)
kernel.add_window('RasterProperty', RasterProperty)
kernel.add_window('EngraveProperty', EngraveProperty)
kernel.add_window('Controller', Controller)
kernel.add_window("Preferences", Preferences)
kernel.add_window("CameraInterface", CameraInterface)
kernel.add_window("Settings", Settings)
kernel.add_window("Rotary", RotarySettings)
kernel.add_window("Alignment", Alignment)
kernel.add_window("About", About)
kernel.add_window("DeviceManager", DeviceManager)
kernel.add_window("Keymap", Keymap)
kernel.add_window("UsbConnect", UsbConnect)
kernel.add_window("Navigation", Navigation)
kernel.add_window("Controller", Controller)
kernel.add_window("JobSpooler", JobSpooler)
kernel.add_window("JobInfo", JobInfo)
kernel.add_window("BufferView", BufferView)
kernel.add_window("Adjustments", Adjustments)
kernel.add_control("Delete Settings", self.clear_control)
language = kernel.language
if language is not None and language != 0:
self.language_to(language)(None)
self.kernel.open_window("MeerK40t")
def clear_control(self):
if self.kernel.config is not None:
self.kernel.config.DeleteAll()
self.kernel.config = None
self.kernel.shutdown()
def shutdown(self, kernel):
self.kernel = None
del kernel.modules['MeerK40t']
def language_swap(self, lang):
self.language_to(lang)(None)
self.kernel.open_window("MeerK40t")
def language_to(self, lang):
"""
Returns a function to change the language to the language specified.
:param lang: language to switch to
:return:
"""
def update_language(event):
"""
Update language to the requested language.
"""
language_code, language_name, language_index = supported_languages[lang]
self.kernel.language = lang
if self.locale:
assert sys.getrefcount(self.locale) <= 2
del self.locale
self.locale = wx.Locale(language_index)
if self.locale.IsOk():
self.locale.AddCatalog('meerk40t')
else:
self.locale = None
self.kernel.signal('language', (lang, language_code, language_name, language_index))
return update_language
# end of class MeerK40tGui
def handleGUIException(exc_type, exc_value, exc_traceback):
"""
Handler for errors. Save error to a file, and create dialog.
:param exc_type:
:param exc_value:
:param exc_traceback:
:return:
"""
err_msg = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
print(err_msg)
try:
import datetime
filename = "MeerK40t-{date:%Y-%m-%d_%H_%M_%S}.txt".format(date=datetime.datetime.now())
print(_("Saving Log: %s") % filename)
with open(filename, "w") as file:
# Crash logs are not translated.
file.write("MeerK40t crash log. Version: %s\n" % MEERK40T_VERSION)
file.write("Please report to: %s\n\n" % MEERK40T_ISSUES)
file.write(err_msg)
print(file)
except: # I already crashed once, if there's another here just ignore it.
pass
dlg = wx.MessageDialog(None, err_msg, _('Error encountered'), wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
sys.excepthook = handleGUIException
|
# 자물쇠와 열쇠
#
# https://programmers.co.kr/learn/courses/30/lessons/60059
# 풀이 실패
from copy import deepcopy
def rotate_key(key):
M = len(key)
ret = [ [0] * M for _ in range(M) ]
offset = [ [ (M - 1 - r - c, r - c) for c in range(M)] for r in range(M)]
for r in range(M):
for c in range(M):
ret[r + offset[r][c][0]][c + offset[r][c][1]] = key[r][c]
return ret
def solution(key, lock):
N = len(lock)
M = len(key)
L = N + 2*M
globe = [ [0] * L for _ in range(L) ]
# 지도 위에 lock 복사
for r in range(M, M + N):
for c in range(M, M + N):
globe[r][c] = lock[r-M][c-M]
keys = []
keys.append(key)
keys.append( rotate_key(keys[0]) )
keys.append( rotate_key(keys[1]) )
keys.append( rotate_key(keys[2]) )
for key in keys:
for R in range(0, M + N):
for C in range(0, M + N):
for r in range(0, M):
for c in range(0, M):
globe[R + r][C + c] ^= key[r][c]
success = True
for r in range( max(M, R), min(M + N, R + 1) ):
for c in range( max(M, R), min(M + N, R + 1) ):
if not globe[r][c]:
success = False
break
if not success: break
if open: return True
for r in range(0, M):
for c in range(0, M):
globe[R + r][C + c] ^= key[r][c]
return False
key = [ [0, 0, 0],
[1, 0, 0],
[0, 1, 1]]
lock = [[1, 1, 1],
[1, 1, 0],
[1, 0, 1]]
# true
print( solution(key, lock) ) |
"""Tests with explicit examples.
"""
import numpy as onp
from hypothesis import given, settings
from hypothesis import strategies as st
from utils import assert_arrays_close, assert_transforms_close, sample_transform
import jaxlie
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se2_translation(_random_module):
"""Simple test for SE(2) translation terms."""
translation = onp.random.randn(2)
T = jaxlie.SE2.from_xy_theta(*translation, theta=0.0)
assert_arrays_close(T @ translation, translation * 2)
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se3_translation(_random_module):
"""Simple test for SE(3) translation terms."""
translation = onp.random.randn(3)
T = jaxlie.SE3.from_rotation_and_translation(
rotation=jaxlie.SO3.identity(),
translation=translation,
)
assert_arrays_close(T @ translation, translation * 2)
def test_se2_rotation():
"""Simple test for SE(2) rotation terms."""
T_w_b = jaxlie.SE2.from_rotation_and_translation(
rotation=jaxlie.SO2.from_radians(onp.pi / 2.0),
translation=onp.zeros(2),
)
p_b = onp.array([1.0, 0.0])
p_w = onp.array([0.0, 1.0])
assert_arrays_close(T_w_b @ p_b, p_w)
def test_se3_rotation():
"""Simple test for SE(3) rotation terms."""
T_w_b = jaxlie.SE3.from_rotation_and_translation(
rotation=jaxlie.SO3.from_rpy_radians(onp.pi / 2.0, 0.0, 0.0),
translation=onp.zeros(3),
)
p_b = onp.array([0.0, 1.0, 0.0])
p_w = onp.array([0.0, 0.0, 1.0])
assert_arrays_close(T_w_b @ p_b, p_w)
def test_so3_xyzw_basic():
"""Check that we can create an SO3 object from an xyzw quaternion."""
assert_transforms_close(
jaxlie.SO3.from_quaternion_xyzw(onp.array([0, 0, 0, 1])),
jaxlie.SO3.identity(),
)
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se3_compose(_random_module):
"""Compare SE3 composition in matrix form vs compact form."""
T1 = sample_transform(jaxlie.SE3)
T2 = sample_transform(jaxlie.SE3)
assert_arrays_close(T1.as_matrix() @ T2.as_matrix(), (T1 @ T2).as_matrix())
assert_transforms_close(
jaxlie.SE3.from_matrix(T1.as_matrix() @ T2.as_matrix()), T1 @ T2
)
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
from builtins import str
from builtins import range
from builtins import object
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
from qgis.PyQt.uic import *
from qgis.core import *
from qgis.utils import *
from qgis.gui import *
from ProjektImport import *
from osgeo import gdal, ogr
from osgeo.gdalconst import *
from gui_geologie import *
class GeologieDialog(QtWidgets.QDialog, Ui_frmGeologie):
def __init__(self,iface,pfad = None,vogisPfad = None):
QtWidgets.QDialog.__init__(self)
Ui_frmGeologie.__init__(self)
self.iface = iface
# Set up the user interface from Designer.
self.setupUi(self)
self.pfad = pfad
self.checkButtonsGroup.setExclusive(True) #wenn im Designer gesetzt, wirds beim Coderzeugen nicht übernommen
self.checkButtonsGroup2.setExclusive(True) #wenn im Designer gesetzt, wirds beim Coderzeugen nicht übernommen
self.checkButtonsGroup5.setExclusive(False) #wenn im Designer gesetzt, wirds beim Coderzeugen nicht übernommen
# Legendeninterface instanzieren. Wird gebraucht um die Layer checked oder uncheckd zu schalten (Kreuzchen)
#self.leginterface = self.iface.legendInterface()
self.vogisPfad = vogisPfad
#************************************************************************************************
# load_raster()
#************************************************************************************************
def load_raster(self,path,basename,button_text):
#Prüfen ob der Layer schon einmal geladen wurde!
#Das machen wir halt nur über den Namen, aber das reicht!
#if len(QgsMapLayerRegistry.instance().mapLayersByName(button_text)) < 1:
if len(QgsProject.instance().mapLayersByName(button_text)) < 1:
layer = QgsRasterLayer(path,basename)
else:
return
Lyr = rastername() #ind. datentyp!
if not layer.isValid():
QtWidgets.QMessageBox.warning(None, "Fehler beim laden des Themas", "Thema:%s /nPfad: %s/nFehler:%s " %(button_text,path,str(layer.lastError())))
else:
Lyr.anzeigename = button_text
Lyr.rasterobjekt = layer
self.layerliste.append(Lyr)
#************************************************************************************************
# accept()
#************************************************************************************************
def accept(self):
rlayer = []
self.layerliste = [] #leere Liste, wird mit unserem ind. Datentyp gefüllt werden
projekt = ProjektImport(self.iface)
mc=self.iface.mapCanvas()
ext = mc.extent()
mc.setRenderFlag(False)
#layercount = QgsMapLayerRegistry.instance().count()
layercount = len(QgsProject.instance().layerTreeRoot().findLayers())
#-------------------------------------
# Lasche: Allgemein
#-------------------------------------
if (self.tabWidget.currentIndex() == 0):
buttoncount = 0
for button in self.checkButtonsGroup.buttons():
if button.isChecked():
buttoncount = + 1
if ("Geologische Karte Vorarlberg (GBA, 2007)" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Vlbg/Geologischekarte_GBA/geologischekarte.qgs",None,None,None,None,"Geologische Karte Vorarlberg (GBA, 2007)")
elif ("Geologische_Tektonische Karte (GBA, 1998)" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Vlbg/Geotektonischekarte_GBA/geotektonischekarte.qgs",None,None,None,None,"Geologische_Tektonische Karte (GBA, 1998)")
elif ("Bohrprofile" in button.text()):
projekt.importieren(self.pfad + "/Bohrungen/Vlbg/Bohrprofil/bohrprofil.qgs",)
elif ("Ereigniskataster" in button.text()):
projekt.importieren(self.pfad + "/Ereigniskataster/Vlbg/ereigniskataster.qgs",)
elif ("Geologische Detailuntersuchungen" in button.text()):
projekt.importieren(self.pfad + "/Geologie_Detailuntersuchungen/Vlbg/Geologie_Detailuntersuchungen/Geologie_Detailuntersuchungen.qgs",None,None,None,None,"Geologische Detailuntersuchungen")
elif ("Geologische Karte (Richter)" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Vlbg/Geologischekarte_Richter/geologischekarte_richter.qgs",None,None,None,None, "Geologische Karte (Richter)")
elif ("Geologie Rheintal (Starck, 1970)" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Rheintal/Geologie_Starck/Geologie_Starck.qgs",None,None,None,None,None)
elif ("Geomorphologische Karten (Uni Amsterdam)" in button.text()):
aaa = 23
elif ("Gefahrenhinweiskarte (GBA, 2006)" in button.text()):
projekt.importieren(self.pfad + "/Georisiko_Karte/Vlbg/Gefahrenhinweiskarte/Gefahrenhinweiskarte.qgs",None,None,None,None,"Gefahrenhinweiskarte (GBA, 2006)")
elif ("Georisken Montafon (Bertle, 1995)" in button.text()):
projekt.importieren(self.pfad + "/Georisiko_Karte/Montafon/Georisikokarte_Bertle/georisken.qgs",None,None,None,None,None)
elif ("Geotop-Inventar" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Vlbg/Geotopinventar/geotopinventar.qgs",None,None,None,None,"Geotop-Inventar")
elif ("Grundwasser-Chemismus Rheintal (Starck)" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Rheintal/Grundwasser_Starck/gwch_Starck.qgs",None,None,None,None,None)
elif ("Grundwasser-Schichten_Linien Rheintal (nur VIIa)" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Rheintal/grundwasser_schichtenlinien.qgs",None,None,None,None,"Grundwasser-Schichten_Linien Rheintal (nur VIIa)")
elif (("Historische Übersichtskarte (Schmidt 1839-1841") in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Vlbg/Schmidt_1839/schmidt1839.qgs",None,None,None,None,("Historische Übersichtskarte (Schmidt 1839-1841"))
else:
QtWidgets.QMessageBox.warning(None, "Thema nicht vorhanden", "<P><FONT SIZE='16' COLOR='#800000'>%s</FONT></P>" %(button.text()))
#Warnung wenn keine Themen ausgewählt wurden
if buttoncount == 0:
QtWidgets.QMessageBox.warning(None, "Keine Themen ausgewaehlt", "<P><FONT SIZE='10' COLOR='#B00000'>Keine Themen ausgewaehlt !</FONT></P>")
#-------------------------------------
# Lasche: Geologische Gebietskarten
#-------------------------------------
if (self.tabWidget.currentIndex() == 1):
buttoncount = 0
for button in self.checkButtonsGroup2.buttons():
if button.isChecked():
buttoncount = + 1
if ("Übersichtskarten" in button.text()):
projekt.importieren(self.pfad + "/Geologische_Karte/Vlbg/Karten_Uebersicht/geologie_uebersicht.qgs",)
elif ("Arlberggebiet (GBA, 1932)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Arlberggebiet.ecw","geo_Arlberggebiet",button.text())
elif ("Bezau (GBA, Manuskript)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Bezau.ecw","geo_Bezau",button.text())
elif ("Bregenz (GBA, 1982)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Bregenz.ecw","geo_Bregenz",button.text())
elif ("Dornbirn Nord (GBA, 1994)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Dornbirn_nord.ecw","geo_Dornbirn_nord",button.text())
elif ("Dornbirn Süd (GBA, 1982)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Dornbirn_sued.ecw","geo_Dornbirn_sued",button.text())
elif ("Flexenpass (Doert und Helmcke, 1975)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/Geo_Flexenpass.ecw","Geo_Flexenpass",button.text())
elif ("Heiterwand (Tirol) (GBA, 1932)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Tirol/geo_Heiterwand.ecw","geo_Heiterwand",button.text())
elif ("Klostertal (Helmcke, 1972)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/Geo_Klostertal.ecw","Geo_Klostertal",button.text())
elif ("Klostertaler Alpen (GBA, 1932)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Klostertaleralpen.ecw","geo_Klostertaleralpen",button.text())
elif ("Liechtenstein (RFL, 1985)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Liechtenstein.ecw","geo_Liechtenstein",button.text())
elif ("Mittelberg (GBA, 1990)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Mittelberg.ecw","geo_Mittelberg",button.text())
elif ("Parseiergruppe (Tirol) (GBA, 1932)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Tirol/geo_Parseiergruppe.ecw","geo_Parseiergruppe",button.text())
elif ("Partenen Ost (GBA, 1980)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Partenen_ost.ecw","geo_Partenen_ost",button.text())
elif ("Partenen West (GBA, 1980)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Partenen_west.ecw","geo_Partenen_west",button.text())
elif ("Rätikon (GBA)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Raetikon.ecw","geo_Raetikon",button.text())
elif ("Schönenbach" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Schoenenbach.ecw","geo_Schoenenbach",button.text())
elif ("Stuben (GBA, 1937)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Stuben.ecw","geo_Stuben",button.text())
elif ("Sulzberg (GBA, 1984)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Sulzberg.ecw","geo_Partenen_west",button.text())
elif ("Vorderwald (Muheim, 1934)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/Geo_Vorderwald.ecw","Geo_Vorderwald",button.text())
elif ("Walgau (GBA, 1967)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/geo_Walgau.ecw","geo_Walgau",button.text())
elif ("Walsertal (Otte und Helmcke)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Karte/Vlbg/Geo_Walsertal.ecw","Geo_Walsertal",button.text())
else:
QtWidgets.QMessageBox.warning(None, "Thema nicht vorhanden", "<P><FONT SIZE='16' COLOR='#800000'>%s</FONT></P>" %(button.text()))
#Warnung wenn keine Themen ausgewählt wurden
if buttoncount == 0:
QtWidgets.QMessageBox.warning(None, "Keine Themen ausgewaehlt", "<P><FONT SIZE='10' COLOR='#B00000'>Keine Themen ausgewaehlt !</FONT></P>")
#-------------------------------------
# Lasche: Geologische Detailkarten
#-------------------------------------
if (self.tabWidget.currentIndex() == 2):
buttoncount = 0
for button in self.checkButtonsGroup3.buttons():
if button.isChecked():
buttoncount = + 1
if ("Ausser Montafon (Bertha, 1978)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Ausser_Montafon.ecw","geo_Ausser_Montafon",button.text())
elif ("Dalaas (Koehler, 1977)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Dalaas.ecw","geo_Dalaas",button.text())
elif ("Davenna (Kasper, 1990)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Davenna.ecw","geo_Davenna",button.text())
elif ("Firstkette (Golde, 1993)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Firstkette.ecw","geo_Firstkette",button.text())
elif ("Gafadura (Post, 1996)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Gafadura.ecw","geo_Gafadura",button.text())
elif ("Gargellen (Bertle, 1972)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Gargellen.ecw","geo_Gargellen",button.text())
elif ("Gopfberg (Oberhauser M., 1993)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Gopfberg.ecw","geo_Gopfberg",button.text())
elif ("Rätikon östlich (Steinacher, 2004)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Raetikon_st1.ecw","geo_Raetikon_st1",button.text())
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Raetikon_st2.ecw","geo_Raetikon_st2",button.text())
elif ("Rätikon östlich (Mayerl, 2005)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Raetikon_ma.ecw","geo_Raetikon_ma",button.text())
elif ("Sibratsgfäll (Haak, 1995)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Sibratsgfaell.ecw","geo_Sibratsgfaell",button.text())
elif ("Tschagguns - Mauren (Bertle, 1995)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Tschagguns_Mauren.ecw","geo_Tschagguns_Mauren",button.text())
elif ("Tschöppa (Bertle, 1992)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Tschoeppa.ecw","geo_Tschoeppa",button.text())
elif ("Winterstaude (Oberhauser)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Winterstaude_ka.ecw","geo_Winterstaude_ka",button.text())
elif ("Winterstaude (Alexander)" in button.text()):
self.load_raster(self.pfad + "/Geologische_Detailkarte/Vlbg/geo_Winterstaude_ka.ecw","geo_Winterstaude_ka",button.text())
else:
QtWidgets.QMessageBox.warning(None, "Thema nicht vorhanden", "<P><FONT SIZE='16' COLOR='#800000'>%s</FONT></P>" %(button.text()))
#Warnung wenn keine Themen ausgewählt wurden
if buttoncount == 0:
QtWidgets.QMessageBox.warning(None, "Keine Themen ausgewaehlt", "<P><FONT SIZE='10' COLOR='#B00000'>Keine Themen ausgewaehlt !</FONT></P>")
#-------------------------------------
# Lasche: Georisiko-Kraten (AGK)
#-------------------------------------
if (self.tabWidget.currentIndex() == 3):
buttoncount = 0
for button in self.checkButtonsGroup4.buttons():
if button.isChecked():
buttoncount = + 1
# Geologie
if ("Alberschwende" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geol_Alberschwende.ecw","Geol_Alberschwende",button.text())
elif ("Au" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geol_Au.ecw","Geol_Au",button.text())
elif ("Faschina" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geol_Faschina.ecw","Geol_Faschina",button.text())
elif ("Hochtannberg/Arlberg" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geol_Hochtannberg_Arlberg.ecw","Geol_Hochtannberg_Arlberg",button.text())
elif ("Sibratsgfäll" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geol_Sibratsgfaell.ecw","Geol_Sibratsgfaell",button.text())
# Rutschung
elif ("Alberschwende" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Rutschung_Alberschwende.ecw","Georisk_Rutschung_Alberschwende",button.text())
elif ("Faschina" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Rutschung_Faschina.ecw","Georisk_Rutschung_Faschina",button.text())
elif ("Hochtannberg" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Rutschung_Hochtannberg.ecw","Georisk_Rutschung_Hochtannberg",button.text())
elif ("Schoppernau" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Rutschung_Schoppernau.ecw","Georisk_Rutschung_Schoppernau",button.text())
elif ("Walgau" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Rutschung_Walgau.ecw","Georisk_Rutschung_Walgau",button.text())
# Steinschlag
elif ("Hochtannberg" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Steinschlag_Hochtannberg.ecw","Georisk_Steinschlag_Hochtannberg",button.text())
elif ("Klostertal" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Steinschlag_Klostertal.ecw","Georisk_Steinschlag_Klostertal",button.text())
elif ("Mellau" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Steinschlag_Mellau.ecw","Georisk_Steinschlag_Mellau",button.text())
elif ("Schröcken" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Steinschlag_Schroecken.ecw","Georisk_Steinschlag_Schroecken",button.text())
elif ("Walgau" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Georisk_Steinschlag_Walgau.ecw","Georisk_Steinschlag_Walgau",button.text())
# Geotechnik
elif ("Alberschwende Nord" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Alberschwende_N.ecw","Geotech_Alberschwende_N",button.text())
elif ("Alberschwende Süd" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Alberschwende_S.ecw","Geotech_Alberschwende_S",button.text())
elif ("Au" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Au.ecw","Geotech_Au",button.text())
elif ("Flexenpass" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Flexenpass.ecw","Geotech_Flexenpass",button.text())
elif ("Ippacherwald" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Ippacherwald.ecw","Geotech_Ippacherwald",button.text())
elif ("Lech" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Lech.ecw","Geotech_Lech",button.text())
elif ("Mellau" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Lech.ecw","Geotech_Lech",button.text())
elif ("Schoppernau" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Schoppernau.ecw","Geotech_Schoppernau",button.text())
elif ("Schröcken" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Schroecken.ecw","Geotech_Schroecken",button.text())
elif ("Schwarzachtobel" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Schwarzachtobel.ecw","Geotech_Schwarzachtobel",button.text())
elif ("Sibratsgfäll" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Sibratsgfaell.ecw","Geotech_Sibratsgfaell",button.text())
elif ("Warth" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Warth.ecw","Geotech_Warth",button.text())
elif ("Warth/Saloberkopf" in button.text()):
self.load_raster(self.pfad + "/Georisiko_Karte/Vlbg/Geotech_Warth_Saloberkopf.ecw","Geotech_Warth_Saloberkopf",button.text())
else:
QtWidgets.QMessageBox.warning(None, "Thema nicht vorhanden", "<P><FONT SIZE='16' COLOR='#800000'>%s</FONT></P>" %(button.text()))
#Warnung wenn keine Themen ausgewählt wurden
if buttoncount == 0:
QtWidgets.QMessageBox.warning(None, "Keine Themen ausgewaehlt", "<P><FONT SIZE='10' COLOR='#B00000'>Keine Themen ausgewaehlt !</FONT></P>")
#-------------------------------------
# Lasche: Geomorpholigie UNI Amsterdam
#-------------------------------------
if (self.tabWidget.currentIndex() == 4):
buttoncount = 0
for button in self.checkButtonsGroup5.buttons():
if button.isChecked():
buttoncount = + 1
if ("Geomorph. Legende (Orginal)" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/Geomorphologische_Legende_Original.tif","Geomorphologische_Legende_Original",button.text())
elif ("Geomorph. Legende (Deutsch)" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/Geomorphologische_Legende.tif","Geomorphologische_Legende",button.text())
elif ("Blatt Au:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_au.tif","geomorph_au",button.text())
elif ("Blatt Bartholomäberg:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_bartholomaeberg.tif","geomorph_bartholomaeberg",button.text())
elif ("Blatt Bezau:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_bezau.tif","geomorph_bezau",button.text())
elif ("Blatt Bizau:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_bizau.tif","geomorph_bizau",button.text())
elif ("Blatt Brand-Nord:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_brand-nord.tif","geomorph_brand-nord",button.text())
elif ("Blatt Brand-Süd:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_brand-sued.tif","geomorph_brand-sued",button.text())
elif ("Blatt Damüls:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_damuels.tif","geomorph_damuels",button.text())
elif ("Blatt Damülser Mittagsspitze:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_damuelser-mittagsspitze.tif","geomorph_damuels",button.text())
elif ("Blatt Diedamskopf:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_diedamskopf.tif","geomorph_diedamskopf",button.text())
elif ("Blatt Dunza-Tschengla:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_dunza-tschengla.tif","geomorph_dunza-tschengla",button.text())
elif ("Blatt Fundelkopf:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_fund-kopf.tif","geomorph_fund-kopf",button.text())
elif ("Blatt Gampberg:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_gampberg.tif","geomorph_gampberg",button.text())
elif ("Blatt Gurtis:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_gurtis.tif","geomorph_gurtis",button.text())
elif ("Blatt Klaus-Weiler:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_fund-kopf.tif","geomorph_fund-kopf",button.text())
elif ("Blatt Hopfreben:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_hopfreben.tif","geomorph_hopfreben",button.text())
elif ("Blatt Ludesch:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_ludesch.tif","geomorph_ludesch",button.text())
elif ("Blatt Marul:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_marul.tif","geomorph_marul",button.text())
elif ("Blatt Mellau:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_mellau.tif","geomorph_mellau",button.text())
elif ("Blatt Mellenspitze:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_mellenspitze.tif","geomorph_mellenspitze",button.text())
elif ("Blatt Mittleres Silbertal:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_mittleres-silbertal.tif","geomorph_mittleres-silbertal",button.text())
elif ("Blatt Nenzinger Himmel:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_nenzinger-himmel.tif","geomorph_nenzinger-himmel",button.text())
elif ("Blatt Rellstal-Golm:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_rellstal-golm.tif","geomorph_rellstal-golm",button.text())
elif ("Blatt Rellstal-Zimba:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_rellstal-zimba.tif","geomorph_rellstal-zimba",button.text())
elif ("Blatt Satteins:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_satteins.tif","geomorph_satteins",button.text())
elif ("Blatt Schnepfau:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_schnepfau.tif","geomorph_schnepfau",button.text())
elif ("Blatt Schnifis:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/Geomorph_Schnifis.tif","Geomorph_Schnifis",button.text())
elif ("Blatt Schoppernau:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_schoppernau.tif","geomorph_schoppernau",button.text())
elif ("Blatt Schönenbach:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_schoenenbach.tif","geomorph_schoenenbach",button.text())
elif ("Blatt Silbertal:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_silbertal.tif","geomorph_silbertal",button.text())
elif ("Blatt Sonntag:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_sonntag.tif","geomorph_sonntag",button.text())
elif ("Blatt St. Gallenkirch:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_st_gallenkirch.tif","geomorph_st_gallenkirch",button.text())
elif ("Blatt Zitterklapfen:" in button.text()):
self.load_raster(self.pfad + "/Geomorphologische_Karte/Vlbg/geomorph_zitterklapfen.tif","geomorph_zitterklapfen",button.text())
else:
QtWidgets.QMessageBox.warning(None, "Thema nicht vorhanden", "<P><FONT SIZE='16' COLOR='#800000'>%s</FONT></P>" %(button.text()))
#--------------------------------------------------------------------------
# Max-Extent der Layers ermitteln wenn keine Layer zuvor geladen wurden.
#--------------------------------------------------------------------------
if layercount == 0:
xmin = 999999999999.9
xmax = -999999999999.9
ymin = 999999999999.9
ymax = -999999999999.9
for i in range(len(rlayer)):
a = rlayer[i].extent()
if a.xMinimum() < xmin:
xmin = a.xMinimum()
if a.xMaximum() > xmax:
xmax = a.xMaximum()
if a.yMinimum() < ymin:
ymin = a.yMinimum()
if a.yMaximum() > ymax:
ymax = a.yMaximum()
#QtGui.QMessageBox.about(None, "Computed Extent", "<FONT SIZE='12' COLOR='#0000A0'>X: %s %s Y: %s %s</FONT></P>" %(xmin , xmax, ymin, ymax))
#-----------------------------------------------------------------
# Max-Extent stzen wenn keine Layer zuvor geladen wurden.
#-----------------------------------------------------------------
if layercount == 0:
rect = QgsRectangle(xmin, xmax, ymin , ymax )
mc.setExtent(rect)
gruppenname = ""
if (self.tabWidget.currentIndex() == 4): #Geoelogie Allgemein
gruppenname = "Geomorphologie UNI Amsterdam"
elif (self.tabWidget.currentIndex() == 3): #Geoelogie Allgemein
gruppenname = "Georisiko Karten"
elif (self.tabWidget.currentIndex() == 2): #Geoelogie Allgemein
gruppenname = "Geologische Detailkarten"
elif (self.tabWidget.currentIndex() == 1): #Geoelogie Allgemein
gruppenname = "Geologische Gebietskarten"
elif (self.tabWidget.currentIndex() == 0): #Geoelogie Allgemein
gruppenname = "Geologie Allgemein"
gruppe_vorhanden = False
legendroot = QgsProject.instance().layerTreeRoot()
# Raster Layer(s) instanzieren: Dazu die Layerliste durchlaufen
for i in range(len(self.layerliste)):
#initialisieren
self.einzelliste = self.layerliste[i] #gibt #ind. datentyp zurück!
QgsProject.instance().addMapLayer(self.einzelliste.rasterobjekt)
#wenn Gruppenlayer nicht vorhanden ist, anlegen
index = legendroot.findGroup(gruppenname)
if index == None:
#grp = self.leginterface.addGroup(gruppenname,0) #so hat die Gruppe das QGIS spez. Aussehen
index = legendroot.insertGroup(-1,gruppenname)
kindi = QgsProject.instance().layerTreeRoot().findLayer(self.einzelliste.rasterobjekt.id())
zwtsch = kindi.clone()
index.insertChildNode(-1,zwtsch)
#QtGui.QMessageBox.about(None, "Gruppe vorhanden", str(kindi))
kindi.parent().removeChildNode(kindi)
index.setExpanded(False)
if type(self.einzelliste.rasterobjekt) is QgsRasterLayer: #nur Raster werden in der Legende nach unten geschoben
anzeigename = self.einzelliste.anzeigename
self.einzelliste.rasterobjekt.setName(anzeigename)
mc.setRenderFlag(True)
#************************************************************************************************
# clicked()
#
# Funktion fuer die Info-Buttons die verschiedene legenden-PDF's laden
#************************************************************************************************
def clicked(self):
button = self.sender()
if button is None or not isinstance(button, QPushButton):
return
if button.objectName() == "Button_Legend_Geologie_2007":
os.startfile(self.pfad + "/Geologische_Karte/Vlbg/Geologischekarte_GBA/GeologischeKarte_2007_Legende.pdf")
elif button.objectName() == "Button_Legend_Tektonisch_1998":
os.startfile(self.pfad + "/Geologische_Karte/Vlbg/Geotektonischekarte_GBA/GeotektonischeKarte_1998_Legende.pdf")
elif button.objectName() == "Profilschintt_Vorarlberg":
os.startfile(self.pfad + "/Geologische_Karte/Vlbg/Geologie_Profilschnitt.pdf")
elif button.objectName() == "Button_Legend_Geomorpg_Orig":
os.startfile(self.pfad + "//Geomorphologische_Karte/Vlbg/Geomorphologische_Legende_Original.pdf")
elif button.objectName() == "Button_Legend_Geomorph_Deutsch":
os.startfile(self.pfad + "/Geomorphologische_Karte/Vlbg/Geomorphologische_Legende.pdf")
#************************************************************************************************
# doGeomorphologie_Amsterdam()
#
# Funktion fuer das Subwindow fuer die Geomorphologische Karten (Uni Amsterdam)
#************************************************************************************************
def doGeomorphologie_Amsterdam(self):
Geomorphologie = Geomorphologie_Amsterdam(self.iface,vogisPfad +"Blattschnitte/Vlbg/Blattschnitte.qgs")
Geomorphologie.exec_() #ACHTUNG: wird kein self.iface.mainWindow() als parent übergeben brauchts exec
#sondt müßte der parent dann für die Initialisierung von QDialog verwendet werden
#diese Klasse ist nichts anderes wie eine
#art struct, wir wollen das layerobjekt (Typ QgsMapLayer)
#und den Anzeigename in einem Datentyp zusammenfassen
class rastername(object):
def __init__(self):
self.rasterobjekt = QgsRasterLayer()
self.anzeigename = str |
STATES = {
1: {
'title': 'Base task',
'descr': 'Simple task'
},
2: {
'title': 'Subtask',
'descr': 'Sub task, visible only in subtask menu.'
}
}
|
from django.shortcuts import render, redirect
from .models import Reflection, Submission, Question, QuestionSubmission, User
from django.utils import timezone, dateformat
from datetime import datetime
from django.contrib.auth.models import User
def home(request):
user = request.user
try:
reflection = Reflection.objects.get(date=timezone.now())
submission = reflection.submission_set.get(user=user)
except Reflection.DoesNotExist:
reflection = None
submission = None
except Submission.DoesNotExist:
submission = None
return render(request, "reflections/base.html", {"reflection": reflection, "submission": submission})
def submit_reflection(request, id):
# Process form
reflection = Reflection.objects.get(id=id)
submission = reflection.submission_set.create(user=request.user)
for key, value in request.POST.items():
if key.startswith("question-"):
question_id = int(key.split("-")[1])
question = Question.objects.get(id=question_id)
question.questionsubmission_set.create(
question=question, submission=submission, answer=value
)
return redirect("reflections:home")
def admin_view(request):
users = User.objects.all()
submissions = Submission.objects.all()
try:
reflection = Reflection.objects.get(date=timezone.now())
except Reflection.DoesNotExist:
reflection = None
return render(
request,
"reflections/admin_view.html",
{"users": users, "reflection": reflection, "submissions": submissions},
)
def submission_detail(request):
reflection = Reflection.objects.get(date=timezone.now())
submission = Submission.objects.get(user=request.user, reflection=reflection)
return render(
request,
"reflections/submission_detail.html",
{"reflection": reflection, "submission": submission},
)
def individual_feedback(request,id):
feedback = request.POST["individual_feedback"]
reflection = Reflection.objects.get(date=timezone.now())
submission = Submission.objects.get(id=id)
submission.feedback = feedback
submission.save()
return redirect("reflections:home")
|
import discord;
from discord.ext import commands;
import json;
#LEVELING SYSTEM
#CONFIG
with open(r"C:\Users\antho\Desktop\Saitama\Config.json", "r") as f:
config = json.load(f);
class LevelSystem(commands.Cog):
def __init__(self, client):
self.client = client;
@commands.command(pass_context = True)
async def level(self, ctx, user: discord.Member):
with open(config["userDatabasePath"], "r") as f:
users = json.load(f);
emb = discord.Embed(title = "~{}'s L E V E L~".format(user.name), description = "__");
emb.set_author(name = config["name"], icon_url = config["profilePic"]);
emb.set_footer(text = config["defaultFooter"]);
emb.set_thumbnail(url = user.avatar_url);
emb.add_field(name = "Level", value = "LVL " + str(users[f"{user.id}"]["level"]));
emb.add_field(name = "Experience", value = str(users[f"{user.id}"]["experience"]) + " XP");
await ctx.send(embed = emb);
@commands.command(pass_context = True)
async def level(self, ctx):
user = ctx.author;
with open(config["userDatabasePath"], "r") as f:
users = json.load(f);
emb = discord.Embed(title = "~{}'s L E V E L~".format(user.name), description = "__");
emb.set_author(name = config["name"], icon_url = config["profilePic"]);
emb.set_footer(text = config["defaultFooter"]);
emb.set_thumbnail(url = user.avatar_url);
emb.add_field(name = "Level", value = "LVL " + str(users[f"{user.id}"]["level"]));
emb.add_field(name = "Experience", value = str(users[f"{user.id}"]["experience"]) + " XP");
await ctx.send(embed = emb);
def setup(client):
client.add_cog(LevelSystem(client)); |
N = int(raw_input())
array = list(map(int,raw_input().split(' ')))
negative = 0
positive = 0
zeros = 0
for i in range(N):
if array[i] < 0:
negative += 1
elif array[i] > 0:
positive += 1
else:
zeros += 1
print '%.6f' % (positive/float(N))
print '%.6f' % (negative/float(N))
print '%.6f' % (zeros/float(N))
|
from math import exp
import math
# PLCOm2012 model (Tammemagi,NEJM,2013)
# Author Kevin ten Haaf
# Organization Erasmus Medical Center Rotterdam
# Last adjusted: April 25, 2017
def execute(info):
#age,edLevel,bmi,copd,hxLungCancer,famHxCanc,race,smokerStatus,cigsPerDay,smokDurat,yrsQuit)
age = int(info['age'])
edLevel = info['edLevel']
bmi = float(info['bmi'])
copd = int(info['copd'])
hxLungCancer = int(info['hxLungCancer'])
famHxCanc = int(info['famHxCanc'])
race = int(info['race'])
smokerStatus = 0 if info['yrsQuit'] else 1
cigsPerDay = int(info['cigsPerDay'])
smokDurat = int(info['smokDurat'])
yrsQuit = int(info['yrsQuit'])
#coeffs: age, edLevel, bmi, copd, personal history, family history, smoking status, cigsPerDay, smoking duration, years since cessation
Coeffs=[-1, 0.0778868,-0.0812744,-0.0274194,0.3553063,0.4589971,0.587185,0.2597431,-1.822606,0.0317321,-0.0308572]
Racecoeffs=[-1, 0,0.3944778,-0.7434744,-0.466585,0,1.027152]
#First the center values for the variables are defined
agecentervalue = 62.0
edLevelcentervalue = 4.0
bmicentervalue = 27.0
cigsPerDaycentervalue =0.4021541613
Smokingdurationcentervalue = 27.0
Smokingcessationcentervalue = 10.0
#Then each model parameter's contribution is calculated
Modelconstant=-4.532506
Agecontribution = (age-agecentervalue)*Coeffs[1]
#print 'Agecontribution= %s' % Agecontribution
edLevelcontribution = (edLevel-edLevelcentervalue)*Coeffs[2]
#print 'edLevelcontribution= %s' % edLevelcontribution
Bmicontribution = (bmi-bmicentervalue)*Coeffs[3]
#print 'Bmicontribution= %s' % Bmicontribution
Copdcontribution = copd*Coeffs[4]
#print 'Copdcontribution= %s' % Copdcontribution
hxLungCancercontribution = hxLungCancer*Coeffs[5]
#print 'hxLungCancercontribution= %s' % hxLungCancercontribution
famHxCanccontribution= famHxCanc*Coeffs[6]
#print 'famHxCanccontribution= %s' % famHxCanccontribution
Smokingstatuscontribution= smokerStatus*Coeffs[7]
#print 'Smokingstatuscontribution= %s' % Smokingstatuscontribution
if cigsPerDay:
cigsPerDaycontribution = ( math.pow((cigsPerDay / 10.0), -1 )-cigsPerDaycentervalue)*Coeffs[8]
else:
cigsPerDaycontribution = (0-cigsPerDaycentervalue)*Coeffs[8]
#print 'cigsPerDaycontribution= %s %s %s %s ' % ( cigsPerDaycontribution, cigsPerDay, cigsPerDaycentervalue, Coeffs[8])
Smokingdurationcontribution = (smokDurat-Smokingdurationcentervalue )*Coeffs[9]
#print 'Smokingdurationcontribution= %s' % Smokingdurationcontribution
Smokingcessationcontribution = (yrsQuit-Smokingcessationcentervalue)*Coeffs[10]
#print 'Smokingcessationcontribution= %s' % Smokingcessationcontribution
Racecontribution = Racecoeffs[race]
#print 'Racecontribution= %s' % Racecontribution
#The individual contributions are summed and the 6-year probability is returned
Sumvalues = Modelconstant+Agecontribution+edLevelcontribution+Bmicontribution+Copdcontribution+hxLungCancercontribution+famHxCanccontribution+Smokingstatuscontribution+cigsPerDaycontribution+Smokingdurationcontribution+Smokingcessationcontribution+Racecontribution
#print 'Sumvalues= %s' % Sumvalues
Sixyearprobabilitypercentage = 100 * exp(Sumvalues)/(1+exp(Sumvalues))
Sixyearprobabilitypercentage = round(Sixyearprobabilitypercentage,2)
#return float(Sixyearprobabilitypercentage)
interpretation = "This individual's six year probability of developing lung cancer is " + str(float(Sixyearprobabilitypercentage)) + "%."
return {"result":Sixyearprobabilitypercentage,"interpretation":interpretation}
def test():
if execute({"age":0,"edLevel":0,"bmi":0,"copd":0,"hxLungCancer":0,"famHxCanc":0,"race":0,"cigsPerDay":0,"smokDurat":0,"yrsQuit":0}) != {'interpretation': "This individual's six year probability of developing lung cancer is 0.01%.", 'result': 0.01}:
return "error."
if execute({"age":70,"edLevel":0,"bmi":0,"copd":0,"hxLungCancer":1,"famHxCanc":1,"race":0,"cigsPerDay":0,"smokDurat":0,"yrsQuit":0}) != {'interpretation': "This individual's six year probability of developing lung cancer is 8.68%.", 'result': 8.68}:
return "error."
if execute({"age":80,"edLevel":2,"bmi":0,"copd":1,"hxLungCancer":1,"famHxCanc":1,"race":0,"cigsPerDay":15,"smokDurat":0,"yrsQuit":0}) != {'interpretation': "This individual's six year probability of developing lung cancer is 6.93%.", 'result': 6.93}:
return "error."
return "ok."
|
import math
import os
# pip install PyGithub. Lib operates on remote github to get issues
from github import Github
import re
import argparse
# pip install GitPython. Lib operates on local repo to get commits
import git as local_git
from google.cloud import translate
CHINESE_CHAR_PATTERN = re.compile("[\u4e00-\u9fff]+")
KOREAN_CHAR_PATTERN = re.compile("[\u3131-\ucb4c]+")
JAPANESS_CHAR_PATTERN = re.compile("[\u3040-\u30ff\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff\uff66-\uff9f]+")
EURPO_CHAR_PATTERN = re.compile("[\u00c0-\u017e]+")
LANG_PATTERN = [CHINESE_CHAR_PATTERN, KOREAN_CHAR_PATTERN, JAPANESS_CHAR_PATTERN]
NONE_ENGLISH_PATTERN = re.compile("[^a-zA-Z0-9\s]+")
translator = translate.Client()
def sentence_contains_chinese(sentence: str) -> bool:
return CHINESE_CHAR_PATTERN.search(sentence) is not None
def sentence_contains_foreign_lang(sentence: str) -> bool:
flag = False
if NONE_ENGLISH_PATTERN.search(sentence) is not None:
flag = True
return flag
def translate_long_sentence(sentence, partition_size=14000):
"""
Translate a long sentence into English.
:param sentence:
:param partition_size:
:return:
"""
trans_content = []
for par in range(math.ceil(len(sentence) / partition_size)):
part = sentence[par * partition_size: (par + 1) * partition_size]
try:
trans_part = translator.translate(part)["translatedText"]
except Exception as e:
print("Exception when translating sentence {}, exception is {}".format(part, e))
trans_part = part
trans_content.append(trans_part)
return " ".join(trans_content)
def translate_intermingual_sentence(sentence: str) -> str:
"""
Find out the Chinese sentences in a long string, translate those parts and return a pure english version sentence
of the input
:param sentence:
:return:
"""
sentence_segments_by_space = sentence.split()
translated_sentence = []
for sentence_segment in sentence_segments_by_space:
if sentence_contains_foreign_lang(sentence_segment):
sentence_segment = re.sub("[^\w]+", " ", sentence_segment)
trans_segment = translate_long_sentence(sentence_segment)
else:
trans_segment = sentence_segment
translated_sentence.append(trans_segment)
return " ".join(translated_sentence)
class MyIssue:
def __init__(self, issue_id, content, create_time, close_time):
self.issue_id = issue_id
self.content = content
self.create_time = create_time
self.close_time = close_time
def __str__(self):
self.content = [x for x in self.content if x is not None]
content_str = "\n".join(self.content)
content_str = re.sub("[,\r\n]+", " ", content_str)
return "{},{},{},{}\n".format(self.issue_id, content_str, self.close_time, self.create_time)
class MyCommit:
def __init__(self, commit_id, summary, diffs, commit_time):
self.commit_id = commit_id
self.summary = summary
self.diffs = diffs
self.commit_time = commit_time
def __str__(self):
summary = re.sub("[,\r\n]+", " ", self.summary)
diffs = " ".join(self.diffs)
diffs = re.sub("[,\r\n]+", " ", diffs)
return "{},{},{},{}\n".format(self.commit_id, summary, diffs, self.commit_time)
class RepoCollector:
def __init__(self, user_name, passwd, download_path, repo_path, do_translation):
self.user_name = user_name
self.passwd = passwd
self.download_path = download_path
self.repo_path = repo_path
self.do_translate = do_translation
def run(self):
git = Github(self.user_name, self.passwd)
git.get_user()
translate_project_flag = self.do_translate
EMPTY_TREE_SHA = "4b825dc642cb6eb9a060e54bf8d69288fbee4904"
output_dir = os.path.join("git_projects", self.repo_path)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
issue_dict = dict()
repo = git.get_repo(self.repo_path)
issues = repo.get_issues(state="all")
issue_file_path = os.path.join(output_dir, "issue.csv")
### TMP fix for data set -- remove it after 2019-8-4###
tarns_csv_file = os.path.join(output_dir, "translated_data", "issue.csv")
with open(tarns_csv_file, encoding='utf8') as fin:
max_id = int(fin.readlines()[1].split(",")[0])
print("creating issue.csv")
with open(issue_file_path, "w", encoding='utf8') as fout:
fout.write("issue_id,issue_content,closed_at,created_at\n")
for issue in issues:
issue_number = issue.number
if issue_number>max_id:
continue
print(issue_number)
content = []
content.append(issue.title)
content.append(issue.body)
issue_close_time = issue.closed_at
issue_create_time = issue.created_at
for comment in issue.get_comments():
content.append(comment.body)
myissue = MyIssue(issue_number, content, issue_create_time, issue_close_time)
fout.write(str(myissue))
### TMP fix for data set -- remove it after 2019-8-4###
# if not os.path.isfile(issue_file_path):
# print("creating issue.csv")
# with open(issue_file_path, "w", encoding='utf8') as fout:
# fout.write("issue_id,issue_content,closed_at,created_at\n")
# for issue in issues:
# issue_number = issue.number
# if issue_number>max_id:
# continue
# print(issue_number)
# content = []
# content.append(issue.title)
# content.append(issue.body)
# issue_close_time = issue.closed_at
# issue_create_time = issue.created_at
# for comment in issue.get_comments():
# content.append(comment.body)
# myissue = MyIssue(issue_number, content, issue_create_time, issue_close_time)
# fout.write(str(myissue))
repo_url = "git@github.com:{}.git".format(self.repo_path)
repo_name = repo_url.split("/")[1]
clone_path = os.path.join(self.download_path, repo_name)
if not os.path.exists(clone_path):
local_git.Repo.clone_from(repo_url, clone_path, branch='master')
local_repo = local_git.Repo(clone_path)
commit_file_path = os.path.join(output_dir, "commit.csv")
if not os.path.isfile(commit_file_path):
print("creating commit.csv...")
with open(commit_file_path, 'w', encoding="utf8") as fout:
fout.write("commit_id,commit_summary, commit_diff,commit_time\n")
for i, commit in enumerate(local_repo.iter_commits()):
print("commit #{}".format(i))
id = commit.hexsha
summary = commit.summary
create_time = commit.committed_datetime
parent = commit.parents[0] if commit.parents else EMPTY_TREE_SHA
differs = set()
for diff in commit.diff(parent, create_patch=True):
diff_lines = str(diff).split("\n")
for diff_line in diff_lines:
if diff_line.startswith("+") or diff_line.startswith("-") and '@' not in diff_line:
differs.add(diff_line)
commit = MyCommit(id, summary, differs, create_time)
fout.write(str(commit))
# Extract links from the commits
with open(os.path.join(output_dir, "links.csv"), 'w', encoding='utf8') as fout, \
open(issue_file_path, encoding='utf8') as issue_in, \
open(commit_file_path, encoding='utf8') as commit_in:
issue_ids = set()
fout.write("issue_id,commit_id\n")
for line in issue_in:
issue_ids.add(line.split(',')[0])
for line in commit_in:
summary = line.split(',')[1]
commit_id = line.split(",")[0]
res = re.search('#\d+', summary)
if res is not None:
linked_issue_id = res.group(0)
issue_id = linked_issue_id.strip("#")
if issue_id not in issue_ids:
print("{} is not in the issue file".format(issue_id))
fout.write("{},{}\n".format(issue_id, commit_id))
# Translate the commit and issue
trans_out_dir = os.path.join(output_dir, "translated_data")
if not os.path.isdir(trans_out_dir):
os.mkdir(trans_out_dir)
trans_issue_file_path = os.path.join(trans_out_dir, "issue.csv")
trans_commit_file_path = os.path.join(trans_out_dir, "commit.csv")
# issue_token_file_path = os.path.join(output_dir, "clean_token_data", "issue.csv")
# commit_token_file_path = os.path.join(output_dir, "clean_token_data", "commit.csv")
if translate_project_flag is True:
print("Translating issue...")
partition_size = 14000
if os.path.isfile(trans_issue_file_path):
with open(trans_issue_file_path, 'r', encoding='utf8') as fin:
translatedLines = fin.readlines()
else:
translatedLines = []
with open(trans_issue_file_path, 'w', encoding='utf8') as fout, open(issue_file_path,
encoding='utf8') as fin:
for i, line in enumerate(fin):
if i == 0:
fout.write(line)
continue
print(i)
if i < len(translatedLines):
trans_line = translatedLines[i].strip("\n\t\r")
fout.write(trans_line + "\n")
else:
issue_id, issue_content, issue_close_time = line.strip("\n\t\r").split(",")
translated_issue_content = translate_intermingual_sentence(issue_content)
fout.write("{},{},{}\n".format(issue_id, translated_issue_content, issue_close_time))
print("Translate commit...")
if os.path.isfile(trans_commit_file_path):
with open(trans_commit_file_path, 'r', encoding='utf8') as fin:
translatedLines = fin.readlines()
else:
translatedLines = []
with open(trans_commit_file_path, 'w', encoding='utf8') as fout, open(commit_file_path,
encoding='utf8') as fin:
for i, line in enumerate(fin):
if i == 0:
fout.write(line)
continue
print(i)
if i < len(translatedLines):
trans_line = translatedLines[i].strip("\n\t\r")
fout.write(trans_line + "\n")
else:
commit_id, commit_summary, commit_content, commit_time = line.strip("\n\t\r").split(",")
translated_commit_summary = translate_intermingual_sentence(commit_summary)
commit_content = " ".join(commit_content.split()[:400])
translated_commit_content = translate_intermingual_sentence(commit_content)
fout.write(
"{},{},{},{}\n".format(commit_id, translated_commit_summary, translated_commit_content,
commit_time))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Github script")
parser.add_argument("-u", help="user name")
parser.add_argument("-p", help="password")
parser.add_argument("-d", help="download path")
parser.add_argument("-r", nargs="+", help="repo path in github, a list of repo path can be passed")
parser.add_argument("-t", action="store_true", help="boolean value determine whether do translation")
args = parser.parse_args()
for repo_path in args.r:
print("Processing repo: {}".format(repo_path))
rpc = RepoCollector(args.u, args.p, args.d, repo_path, args.t)
rpc.run()
|
#import numpy
import numpy as np
from scipy import signal
def process():
#"""
__all__ = ['octavefilter', 'getansifrequencies', 'normalizedfreq']
def _buttersosfilter(freq, freq_d, freq_u, fs, order, factor, show=0):
# Initialize coefficients matrix
sos = [[[]] for i in range(len(freq))]
# Generate coefficients for each frequency band
for idx, (lower, upper) in enumerate(zip(freq_d, freq_u)):
# Downsampling to improve filter coefficients
fsd = fs / factor[idx] # New sampling rate
# Butterworth Filter with SOS coefficients
sos[idx] = signal.butter(
N = order,
Wn = np.array([lower, upper]) / (fsd / 2),
btype = 'bandpass',
analog = False,
output = 'sos')
return sos
def _genfreqs(limits, fraction, fs):
# Generate frequencies
freq, freq_d, freq_u = getansifrequencies(fraction, limits)
# Remove outer frequency to prevent filter error (fs/2 < freq)
freq, freq_d, freq_u = _deleteouters(freq, freq_d, freq_u, fs)
return freq, freq_d, freq_u
def normalizedfreq(fraction):
predefined = {
3: _thirdoctave(),
}
return predefined[fraction]
def _thirdoctave():
# IEC 61260 - 1 - 2014 (added 12.5, 16, 20 Hz)
return [4, 5, 6.3, 8, 10, 12.5, 16, 20, 25, 31.5, 40, 50, 63, 80, 100,
125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000]
def _deleteouters(freq, freq_d, freq_u, fs):
idx = np.asarray(np.where(np.array(freq_u) > fs / 2))
if any(idx[0]):
freq = np.delete(freq, idx).tolist()
freq_d = np.delete(freq_d, idx).tolist()
freq_u = np.delete(freq_u, idx).tolist()
return freq, freq_d, freq_u
def getansifrequencies(fraction, limits=None):
if limits is None:
limits = [4, 2000]
# Octave ratio g (ANSI s1.11, 3.2, pg. 2)
g = 10 ** (3 / 10) # Or g = 2
# Reference frequency (ANSI s1.11, 3.4, pg. 2)
fr = 1000
# Get starting index 'x' and first center frequency
x = _initindex(limits[0], fr, g, fraction)
freq = _ratio(g, x, fraction) * fr
# Get each frequency until reach maximum frequency
freq_x = 0
while freq_x * _bandedge(g, fraction) < limits[1]:
# Increase index
x = x + 1
# New frequency
freq_x = _ratio(g, x, fraction) * fr
# Store new frequency
freq = np.append(freq, freq_x)
# Get band-edges
freq_d = freq / _bandedge(g, fraction)
freq_u = freq * _bandedge(g, fraction)
return freq.tolist(), freq_d.tolist(), freq_u.tolist()
def _initindex(f, fr, g, b):
if b % 2: # ODD ('x' solve from ANSI s1.11, eq. 3)
return np.round(
(b * np.log(f / fr) + 30 * np.log(g)) / np.log(g)
)
else: # EVEN ('x' solve from ANSI s1.11, eq. 4)
return np.round(
(2 * b * np.log(f / fr) + 59 * np.log(g)) / (2 * np.log(g))
)
def _ratio(g, x, b):
if b % 2: # ODD (ANSI s1.11, eq. 3)
return g ** ((x - 30) / b)
else: # EVEN (ANSI s1.11, eq. 4)
return g ** ((2 * x - 59) / (2 * b))
def _bandedge(g, b):
# Band-edge ratio (ANSI s1.11, 3.7, pg. 3)
return g ** (1 / (2 * b))
def _downsamplingfactor(freq, fs):
guard = 0.10
factor = (np.floor((fs / (2+guard)) / np.array(freq))).astype('int')
for idx in range(len(factor)):
# Factor between 1<factor<50
factor[idx] = max(min(factor[idx], 50), 1)
return factor
def octavefilter(x, fs, fraction=3, order=6, limits=None, show=0, sigbands =0):
# Generate frequency array
freq, freq_d, freq_u = _genfreqs(limits, fraction, fs)
# Calculate the downsampling factor (array of integers with size [freq])
factor = _downsamplingfactor(freq_u, fs)
# Get SOS filter coefficients (3D - matrix with size: [freq,order,6])
sos = _buttersosfilter(freq, freq_d, freq_u, fs, order, factor, show)
# Create array with SPL for each frequency band
spl = np.zeros([len(freq)])
for idx in range(len(freq)):
sd = signal.decimate(x, factor[idx])
y = signal.sosfilt(sos[idx], sd)
spl[idx] = np.sqrt(np.mean(np.square(y)))
return spl.tolist()
x , y, z = [], [], []
A_hw_rms_x, A_hw_rms_y, A_hw_rms_z = [], [], []
fs = 4500
octave_weights = [0.375, 0.545, 0.727, 0.873, 0.951, 0.958, 0.896, 0.782, 0.647, 0.519, 0.411, 0.324, 0.256, 0.202, 0.160, 0.127, 0.101, 0.0799, 0.0634, 0.0503, 0.0398, 0.0314, 0.0245, 0.0186, 0.0135, 0.00894, 0.00536, 0.00295]
for element in dataset:
x.append(element[0])
y.append(element[1])
z.append(element[2])
# Filter (get spectra and signal in bands)
A_hi_rms_x = octavefilter(x, fs=fs, fraction=3, order=30, limits=[4, 2000], show=0)
A_hi_rms_y = octavefilter(y, fs=fs, fraction=3, order=30, limits=[4, 2000], show=0)
A_hi_rms_z = octavefilter(z, fs=fs, fraction=3, order=30, limits=[4, 2000], show=0)
for i in range(len(octave_weights)):
A_hw_rms_x.append(octave_weights[i] * A_hi_rms_x[i])
A_hw_rms_y.append(octave_weights[i] * A_hi_rms_y[i])
A_hw_rms_z.append(octave_weights[i] * A_hi_rms_z[i])
arr_x = np.array(A_hw_rms_x)
arr_y = np.array(A_hw_rms_y)
arr_z = np.array(A_hw_rms_z)
xx = np.sqrt(np.sum(np.square(arr_x)))
yy = np.sqrt(np.sum(np.square(arr_y)))
zz = np.sqrt(np.sum(np.square(arr_z)))
np.sqrt(np.sum(xx**2 + yy**2 + zz**2))
#"""
return 1 |
from ascii_table import Table
from qasm.bridge.config.QuantumComputerConfig import QuantumComputerConfig
from qasm.helpers.Util import SortedDictionary
from qasm.commands.Command import Command
class Show(Command):
def run(self):
"""
Return method for config show command
:return: (None)
"""
config = SortedDictionary(QuantumComputerConfig.get_config())
print("\nQuantum Computer Config")
print(Table([config.names(), config.values()]))
|
#!/usr/bin/python3
"""1-pack_web_static module"""
from os.path import isfile
from datetime import datetime
from fabric.api import local
def do_pack():
"""Generates a .tgz archive from the contents of web_static folder of
AriBnB Clone repo
Returns: Archive path, otherwise False
"""
ct = datetime.now().strftime("%Y%m%d%H%M%S")
local("mkdir -p versions")
local("tar -cvzf versions/web_static_{}.tgz web_static".format(ct))
if isfile("versions/web_static_{}.tgz".format(ct)):
return "versions/web_static_{}.tgz".format(ct)
|
# coding: utf-8
# flake8: noqa
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.alerts_api import AlertsApi
from swagger_client.api.api_keys_api import ApiKeysApi
from swagger_client.api.application_api import ApplicationApi
from swagger_client.api.authentication_api import AuthenticationApi
from swagger_client.api.background_tasks_api import BackgroundTasksApi
from swagger_client.api.countries_and_timezones_api import CountriesAndTimezonesApi
from swagger_client.api.device_group_rules_api import DeviceGroupRulesApi
from swagger_client.api.device_groups_api import DeviceGroupsApi
from swagger_client.api.device_types_api import DeviceTypesApi
from swagger_client.api.devices_api import DevicesApi
from swagger_client.api.discovery_api import DiscoveryApi
from swagger_client.api.dynamic_plugin_api import DynamicPluginApi
from swagger_client.api.indicators_api import IndicatorsApi
from swagger_client.api.maintenance_windows_api import MaintenanceWindowsApi
from swagger_client.api.metadata_api import MetadataApi
from swagger_client.api.metadata_attribute_api import MetadataAttributeApi
from swagger_client.api.metadata_namespace_api import MetadataNamespaceApi
from swagger_client.api.net_flow_api import NetFlowApi
from swagger_client.api.object_group_api import ObjectGroupApi
from swagger_client.api.object_group_rules_api import ObjectGroupRulesApi
from swagger_client.api.objects_api import ObjectsApi
from swagger_client.api.peers_api import PeersApi
from swagger_client.api.permissions_api import PermissionsApi
from swagger_client.api.plugins_api import PluginsApi
from swagger_client.api.policies_api import PoliciesApi
from swagger_client.api.report_attachments_api import ReportAttachmentsApi
from swagger_client.api.report_attachments_alerts_api import ReportAttachmentsAlertsApi
from swagger_client.api.report_attachments_device_groups_api import ReportAttachmentsDeviceGroupsApi
from swagger_client.api.report_attachments_devices_api import ReportAttachmentsDevicesApi
from swagger_client.api.report_attachments_flow_falcon_api import ReportAttachmentsFlowFalconApi
from swagger_client.api.report_attachments_group_metrics_api import ReportAttachmentsGroupMetricsApi
from swagger_client.api.report_attachments_metadata_api import ReportAttachmentsMetadataApi
from swagger_client.api.report_attachments_object_groups_api import ReportAttachmentsObjectGroupsApi
from swagger_client.api.report_attachments_objects_api import ReportAttachmentsObjectsApi
from swagger_client.api.report_attachments_performance_metrics_api import ReportAttachmentsPerformanceMetricsApi
from swagger_client.api.report_attachments_status_map_api import ReportAttachmentsStatusMapApi
from swagger_client.api.report_attachments_telephony_api import ReportAttachmentsTelephonyApi
from swagger_client.api.report_attachments_top_n_api import ReportAttachmentsTopNApi
from swagger_client.api.report_attachments_topology_api import ReportAttachmentsTopologyApi
from swagger_client.api.reports_api import ReportsApi
from swagger_client.api.roles_api import RolesApi
from swagger_client.api.run_report_attachments_api import RunReportAttachmentsApi
from swagger_client.api.status_map_images_api import StatusMapImagesApi
from swagger_client.api.status_maps_api import StatusMapsApi
from swagger_client.api.tags_api import TagsApi
from swagger_client.api.top_n_views_api import TopNViewsApi
from swagger_client.api.topology_api import TopologyApi
from swagger_client.api.users_api import UsersApi
from swagger_client.api.utils_api import UtilsApi
from swagger_client.api.work_hours_api import WorkHoursApi
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.aggregation_selection_setting import AggregationSelectionSetting
from swagger_client.models.alert_attachment_aggregation import AlertAttachmentAggregation
from swagger_client.models.alert_attachment_create_dto import AlertAttachmentCreateDto
from swagger_client.models.alert_attachment_data_dto import AlertAttachmentDataDto
from swagger_client.models.alert_attachment_dto import AlertAttachmentDto
from swagger_client.models.alert_attachment_filters import AlertAttachmentFilters
from swagger_client.models.alert_attachment_filters_schema import AlertAttachmentFiltersSchema
from swagger_client.models.alert_attachment_request_dto_v1 import AlertAttachmentRequestDtoV1
from swagger_client.models.alert_attachment_resource import AlertAttachmentResource
from swagger_client.models.alert_attachment_resource_v1 import AlertAttachmentResourceV1
from swagger_client.models.alert_attachment_response_dto_v1 import AlertAttachmentResponseDtoV1
from swagger_client.models.alert_attachment_result_dto import AlertAttachmentResultDto
from swagger_client.models.alert_attachment_settings import AlertAttachmentSettings
from swagger_client.models.alert_attachment_settings_v1 import AlertAttachmentSettingsV1
from swagger_client.models.alert_attachment_visualization import AlertAttachmentVisualization
from swagger_client.models.alert_attachment_visualization_v1 import AlertAttachmentVisualizationV1
from swagger_client.models.alert_clear_dto import AlertClearDto
from swagger_client.models.alert_create_dto import AlertCreateDto
from swagger_client.models.alert_dto import AlertDto
from swagger_client.models.alert_filter_dto import AlertFilterDto
from swagger_client.models.alert_flow_falcon_dto import AlertFlowFalconDto
from swagger_client.models.alert_report_response_dto import AlertReportResponseDto
from swagger_client.models.alert_setting import AlertSetting
from swagger_client.models.api_info import ApiInfo
from swagger_client.models.api_key_dto import ApiKeyDto
from swagger_client.models.api_key_request_dto import ApiKeyRequestDto
from swagger_client.models.attachment_dto import AttachmentDto
from swagger_client.models.attachment_filter_details import AttachmentFilterDetails
from swagger_client.models.attachment_filters import AttachmentFilters
from swagger_client.models.attribute_dto import AttributeDto
from swagger_client.models.attribute_filter_dto import AttributeFilterDto
from swagger_client.models.attribute_values import AttributeValues
from swagger_client.models.background_task import BackgroundTask
from swagger_client.models.csv_setting import CSVSetting
from swagger_client.models.capacity_threshold import CapacityThreshold
from swagger_client.models.column_setting import ColumnSetting
from swagger_client.models.connection_dto import ConnectionDto
from swagger_client.models.connection_request_dto import ConnectionRequestDto
from swagger_client.models.constraint_dto import ConstraintDto
from swagger_client.models.create_device_request_dto import CreateDeviceRequestDto
from swagger_client.models.create_link_data import CreateLinkData
from swagger_client.models.custom_work_hour import CustomWorkHour
from swagger_client.models.data_aggregation_setting import DataAggregationSetting
from swagger_client.models.data_point_dto import DataPointDto
from swagger_client.models.data_presentation_setting import DataPresentationSetting
from swagger_client.models.device_alerts_dto import DeviceAlertsDto
from swagger_client.models.device_attachment_filters_schema import DeviceAttachmentFiltersSchema
from swagger_client.models.device_description import DeviceDescription
from swagger_client.models.device_discovery_dto import DeviceDiscoveryDto
from swagger_client.models.device_discovery_filter import DeviceDiscoveryFilter
from swagger_client.models.device_dto import DeviceDto
from swagger_client.models.device_filter import DeviceFilter
from swagger_client.models.device_group_dto import DeviceGroupDto
from swagger_client.models.device_group_filter import DeviceGroupFilter
from swagger_client.models.device_group_permission_dto import DeviceGroupPermissionDto
from swagger_client.models.device_group_request_dto import DeviceGroupRequestDto
from swagger_client.models.device_group_rule_dto import DeviceGroupRuleDto
from swagger_client.models.device_groups_request_dto import DeviceGroupsRequestDto
from swagger_client.models.device_groups_request_dto_v1 import DeviceGroupsRequestDtoV1
from swagger_client.models.device_groups_resource import DeviceGroupsResource
from swagger_client.models.device_groups_resource_v1 import DeviceGroupsResourceV1
from swagger_client.models.device_groups_response_dto import DeviceGroupsResponseDto
from swagger_client.models.device_groups_response_dto_v1 import DeviceGroupsResponseDtoV1
from swagger_client.models.device_groups_visualization import DeviceGroupsVisualization
from swagger_client.models.device_groups_visualization_v1 import DeviceGroupsVisualizationV1
from swagger_client.models.device_indicator_dto import DeviceIndicatorDto
from swagger_client.models.device_object_dto import DeviceObjectDto
from swagger_client.models.device_object_group_map_filter import DeviceObjectGroupMapFilter
from swagger_client.models.device_object_group_mapping import DeviceObjectGroupMapping
from swagger_client.models.device_object_id import DeviceObjectId
from swagger_client.models.device_object_request_dto import DeviceObjectRequestDto
from swagger_client.models.device_object_update_request_dto import DeviceObjectUpdateRequestDto
from swagger_client.models.device_tag_dto import DeviceTagDto
from swagger_client.models.device_type_dto import DeviceTypeDto
from swagger_client.models.device_type_request_dto import DeviceTypeRequestDto
from swagger_client.models.device_type_response_dto import DeviceTypeResponseDto
from swagger_client.models.device_type_response_dto_v1 import DeviceTypeResponseDtoV1
from swagger_client.models.device_update_request_dto import DeviceUpdateRequestDto
from swagger_client.models.devices_request_dto import DevicesRequestDto
from swagger_client.models.devices_request_dto_v1 import DevicesRequestDtoV1
from swagger_client.models.devices_resource import DevicesResource
from swagger_client.models.devices_resource_v1 import DevicesResourceV1
from swagger_client.models.devices_response_dto import DevicesResponseDto
from swagger_client.models.devices_response_dto_v1 import DevicesResponseDtoV1
from swagger_client.models.devices_settings import DevicesSettings
from swagger_client.models.devices_settings_v1 import DevicesSettingsV1
from swagger_client.models.devices_visualization import DevicesVisualization
from swagger_client.models.devices_visualization_v1 import DevicesVisualizationV1
from swagger_client.models.discovery_request_dto import DiscoveryRequestDto
from swagger_client.models.dynamic_plugin_field_dto import DynamicPluginFieldDto
from swagger_client.models.dynamic_plugin_manager_request_dto import DynamicPluginManagerRequestDto
from swagger_client.models.dynamic_plugin_manager_response_dto import DynamicPluginManagerResponseDto
from swagger_client.models.dynamic_plugin_request_dto import DynamicPluginRequestDto
from swagger_client.models.dynamic_plugin_response_dto import DynamicPluginResponseDto
from swagger_client.models.endpoint_dto import EndpointDto
from swagger_client.models.field_description import FieldDescription
from swagger_client.models.filter_data_store_details import FilterDataStoreDetails
from swagger_client.models.filter_operation_details import FilterOperationDetails
from swagger_client.models.filter_schema_details import FilterSchemaDetails
from swagger_client.models.filter_value import FilterValue
from swagger_client.models.flow_device_mapping_dto import FlowDeviceMappingDto
from swagger_client.models.flow_falcon_attachment_dto import FlowFalconAttachmentDto
from swagger_client.models.flow_falcon_attachment_filters_schema import FlowFalconAttachmentFiltersSchema
from swagger_client.models.flow_falcon_attachment_response_dto import FlowFalconAttachmentResponseDto
from swagger_client.models.flow_falcon_columns_setting import FlowFalconColumnsSetting
from swagger_client.models.flow_falcon_drill_down_dto import FlowFalconDrillDownDto
from swagger_client.models.flow_falcon_filter import FlowFalconFilter
from swagger_client.models.flow_falcon_group import FlowFalconGroup
from swagger_client.models.flow_falcon_interface import FlowFalconInterface
from swagger_client.models.flow_falcon_performance_metrics_request_dto import FlowFalconPerformanceMetricsRequestDto
from swagger_client.models.flow_falcon_report_request_dto import FlowFalconReportRequestDto
from swagger_client.models.flow_falcon_report_response_dto import FlowFalconReportResponseDto
from swagger_client.models.flow_falcon_request_dto import FlowFalconRequestDto
from swagger_client.models.flow_falcon_resolution_setting import FlowFalconResolutionSetting
from swagger_client.models.flow_falcon_resource import FlowFalconResource
from swagger_client.models.flow_falcon_response_dto_v1 import FlowFalconResponseDtoV1
from swagger_client.models.flow_falcon_setting import FlowFalconSetting
from swagger_client.models.flow_falcon_setting_v1 import FlowFalconSettingV1
from swagger_client.models.flow_falcon_settings import FlowFalconSettings
from swagger_client.models.flow_falcon_settings_v1 import FlowFalconSettingsV1
from swagger_client.models.flow_falcon_template_setting import FlowFalconTemplateSetting
from swagger_client.models.flow_falcon_template_setting_v1 import FlowFalconTemplateSettingV1
from swagger_client.models.flow_falcon_view import FlowFalconView
from swagger_client.models.flow_falcon_view_indicators_dto import FlowFalconViewIndicatorsDto
from swagger_client.models.flow_falcon_visualization import FlowFalconVisualization
from swagger_client.models.flow_falcon_visualization_v1 import FlowFalconVisualizationV1
from swagger_client.models.flow_interface_dto import FlowInterfaceDto
from swagger_client.models.graph_bar_setting import GraphBarSetting
from swagger_client.models.graph_line_setting import GraphLineSetting
from swagger_client.models.graph_pie_setting import GraphPieSetting
from swagger_client.models.graph_radial_setting import GraphRadialSetting
from swagger_client.models.graph_stacked_bar_setting import GraphStackedBarSetting
from swagger_client.models.graph_stacked_line_setting import GraphStackedLineSetting
from swagger_client.models.group_metrics_data import GroupMetricsData
from swagger_client.models.group_metrics_indicator_types import GroupMetricsIndicatorTypes
from swagger_client.models.group_metrics_indicator_types_v1 import GroupMetricsIndicatorTypesV1
from swagger_client.models.group_metrics_request_dto import GroupMetricsRequestDto
from swagger_client.models.group_metrics_request_dto_v1 import GroupMetricsRequestDtoV1
from swagger_client.models.group_metrics_resource import GroupMetricsResource
from swagger_client.models.group_metrics_resource_v1 import GroupMetricsResourceV1
from swagger_client.models.group_metrics_response_dto import GroupMetricsResponseDto
from swagger_client.models.group_metrics_response_dto_v1 import GroupMetricsResponseDtoV1
from swagger_client.models.group_metrics_run_report_request_dto import GroupMetricsRunReportRequestDto
from swagger_client.models.group_metrics_run_report_response_dto import GroupMetricsRunReportResponseDto
from swagger_client.models.group_metrics_run_report_result_dto import GroupMetricsRunReportResultDto
from swagger_client.models.group_metrics_settings_dto import GroupMetricsSettingsDto
from swagger_client.models.group_metrics_settings_dto_v1 import GroupMetricsSettingsDtoV1
from swagger_client.models.group_metrics_visualization import GroupMetricsVisualization
from swagger_client.models.group_metrics_visualization_v1 import GroupMetricsVisualizationV1
from swagger_client.models.incorporate_response import IncorporateResponse
from swagger_client.models.indicator_data_dto import IndicatorDataDto
from swagger_client.models.indicator_description import IndicatorDescription
from swagger_client.models.indicator_dto import IndicatorDto
from swagger_client.models.indicator_request_dto import IndicatorRequestDto
from swagger_client.models.indicator_type_dto import IndicatorTypeDto
from swagger_client.models.indicator_type_dto_v1 import IndicatorTypeDtoV1
from swagger_client.models.indicator_type_request_dto import IndicatorTypeRequestDto
from swagger_client.models.indicator_type_request_dto_v1 import IndicatorTypeRequestDtoV1
from swagger_client.models.internal_object_dto import InternalObjectDto
from swagger_client.models.link_data import LinkData
from swagger_client.models.logging_level import LoggingLevel
from swagger_client.models.maintenance_window_device_dto import MaintenanceWindowDeviceDto
from swagger_client.models.maintenance_window_device_group_dto import MaintenanceWindowDeviceGroupDto
from swagger_client.models.maintenance_window_filter_dto import MaintenanceWindowFilterDto
from swagger_client.models.map_image_dto import MapImageDto
from swagger_client.models.map_setting import MapSetting
from swagger_client.models.mapped_device_group_entity_dto import MappedDeviceGroupEntityDto
from swagger_client.models.mapstringobject import Mapstringobject
from swagger_client.models.mapstringstring import Mapstringstring
from swagger_client.models.metadata_attachment_request_dto import MetadataAttachmentRequestDto
from swagger_client.models.metadata_attachment_request_dto_v1 import MetadataAttachmentRequestDtoV1
from swagger_client.models.metadata_attachment_resource import MetadataAttachmentResource
from swagger_client.models.metadata_attachment_resource_v1 import MetadataAttachmentResourceV1
from swagger_client.models.metadata_attachment_response_dto import MetadataAttachmentResponseDto
from swagger_client.models.metadata_attachment_response_dto_v1 import MetadataAttachmentResponseDtoV1
from swagger_client.models.metadata_attachment_visualization import MetadataAttachmentVisualization
from swagger_client.models.metadata_attachment_visualization_v1 import MetadataAttachmentVisualizationV1
from swagger_client.models.namespace_dto import NamespaceDto
from swagger_client.models.net_flow_aggregation_template_dto import NetFlowAggregationTemplateDto
from swagger_client.models.net_flow_application_dto import NetFlowApplicationDto
from swagger_client.models.net_flow_device_dto import NetFlowDeviceDto
from swagger_client.models.net_flow_device_filter_dto import NetFlowDeviceFilterDto
from swagger_client.models.net_flow_direction_dto import NetFlowDirectionDto
from swagger_client.models.net_flow_field_dto import NetFlowFieldDto
from swagger_client.models.net_flow_field_filter_dto import NetFlowFieldFilterDto
from swagger_client.models.net_flow_filter_create_dto import NetFlowFilterCreateDto
from swagger_client.models.net_flow_filter_dto import NetFlowFilterDto
from swagger_client.models.net_flow_filter_entity_create_dto import NetFlowFilterEntityCreateDto
from swagger_client.models.net_flow_filter_entity_dto import NetFlowFilterEntityDto
from swagger_client.models.net_flow_interface_dto import NetFlowInterfaceDto
from swagger_client.models.net_flow_interface_filter_dto import NetFlowInterfaceFilterDto
from swagger_client.models.net_flow_modes_dto import NetFlowModesDto
from swagger_client.models.net_flow_protocol_dto import NetFlowProtocolDto
from swagger_client.models.net_flow_subnet_category_create_dto import NetFlowSubnetCategoryCreateDto
from swagger_client.models.net_flow_subnet_category_dto import NetFlowSubnetCategoryDto
from swagger_client.models.net_flow_subnet_create_dto import NetFlowSubnetCreateDto
from swagger_client.models.net_flow_subnet_dto import NetFlowSubnetDto
from swagger_client.models.net_flow_view_category_dto import NetFlowViewCategoryDto
from swagger_client.models.net_flow_view_filter_dto import NetFlowViewFilterDto
from swagger_client.models.netflow_device_alerts_dto import NetflowDeviceAlertsDto
from swagger_client.models.netflow_reporting_column_dto import NetflowReportingColumnDto
from swagger_client.models.node_alert import NodeAlert
from swagger_client.models.node_data import NodeData
from swagger_client.models.node_dto import NodeDto
from swagger_client.models.node_request_dto import NodeRequestDto
from swagger_client.models.object_attachment_request_dto import ObjectAttachmentRequestDto
from swagger_client.models.object_attachment_request_dto_v1 import ObjectAttachmentRequestDtoV1
from swagger_client.models.object_attachment_resource import ObjectAttachmentResource
from swagger_client.models.object_attachment_resource_v1 import ObjectAttachmentResourceV1
from swagger_client.models.object_attachment_response_dto import ObjectAttachmentResponseDto
from swagger_client.models.object_attachment_response_dto_v1 import ObjectAttachmentResponseDtoV1
from swagger_client.models.object_attachment_settings import ObjectAttachmentSettings
from swagger_client.models.object_attachment_settings_v1 import ObjectAttachmentSettingsV1
from swagger_client.models.object_attachment_visualization import ObjectAttachmentVisualization
from swagger_client.models.object_attachment_visualization_v1 import ObjectAttachmentVisualizationV1
from swagger_client.models.object_data_dto import ObjectDataDto
from swagger_client.models.object_description import ObjectDescription
from swagger_client.models.object_filter import ObjectFilter
from swagger_client.models.object_group_attachment_request_dto import ObjectGroupAttachmentRequestDto
from swagger_client.models.object_group_attachment_request_dto_v1 import ObjectGroupAttachmentRequestDtoV1
from swagger_client.models.object_group_attachment_resource import ObjectGroupAttachmentResource
from swagger_client.models.object_group_attachment_resource_v1 import ObjectGroupAttachmentResourceV1
from swagger_client.models.object_group_attachment_response_dto import ObjectGroupAttachmentResponseDto
from swagger_client.models.object_group_attachment_response_dto_v1 import ObjectGroupAttachmentResponseDtoV1
from swagger_client.models.object_group_attachment_visualization import ObjectGroupAttachmentVisualization
from swagger_client.models.object_group_attachment_visualization_v1 import ObjectGroupAttachmentVisualizationV1
from swagger_client.models.object_group_dto import ObjectGroupDto
from swagger_client.models.object_group_filter_dto import ObjectGroupFilterDto
from swagger_client.models.object_group_request_dto import ObjectGroupRequestDto
from swagger_client.models.object_group_rule_dto import ObjectGroupRuleDto
from swagger_client.models.object_type_dto import ObjectTypeDto
from swagger_client.models.object_type_dto_v1 import ObjectTypeDtoV1
from swagger_client.models.object_type_request_dto import ObjectTypeRequestDto
from swagger_client.models.object_type_request_dto_v1 import ObjectTypeRequestDtoV1
from swagger_client.models.page_and_sort_options import PageAndSortOptions
from swagger_client.models.pager_alert_dto import PagerAlertDto
from swagger_client.models.pager_attachment_dto import PagerAttachmentDto
from swagger_client.models.pager_attribute_dto import PagerAttributeDto
from swagger_client.models.pager_connection_dto import PagerConnectionDto
from swagger_client.models.pager_constraint_dto import PagerConstraintDto
from swagger_client.models.pager_device_discovery_dto import PagerDeviceDiscoveryDto
from swagger_client.models.pager_device_dto import PagerDeviceDto
from swagger_client.models.pager_device_group_dto import PagerDeviceGroupDto
from swagger_client.models.pager_device_group_permission_dto import PagerDeviceGroupPermissionDto
from swagger_client.models.pager_device_group_rule_dto import PagerDeviceGroupRuleDto
from swagger_client.models.pager_device_object_dto import PagerDeviceObjectDto
from swagger_client.models.pager_device_object_group_mapping import PagerDeviceObjectGroupMapping
from swagger_client.models.pager_device_type_response_dto import PagerDeviceTypeResponseDto
from swagger_client.models.pager_device_type_response_dto_v1 import PagerDeviceTypeResponseDtoV1
from swagger_client.models.pager_flow_device_mapping_dto import PagerFlowDeviceMappingDto
from swagger_client.models.pager_indicator_dto import PagerIndicatorDto
from swagger_client.models.pager_maintenance_window_device_dto import PagerMaintenanceWindowDeviceDto
from swagger_client.models.pager_map_image_dto import PagerMapImageDto
from swagger_client.models.pager_namespace_dto import PagerNamespaceDto
from swagger_client.models.pager_net_flow_aggregation_template_dto import PagerNetFlowAggregationTemplateDto
from swagger_client.models.pager_net_flow_device_dto import PagerNetFlowDeviceDto
from swagger_client.models.pager_net_flow_field_dto import PagerNetFlowFieldDto
from swagger_client.models.pager_net_flow_interface_dto import PagerNetFlowInterfaceDto
from swagger_client.models.pager_net_flow_view_category_dto import PagerNetFlowViewCategoryDto
from swagger_client.models.pager_node_dto import PagerNodeDto
from swagger_client.models.pager_object_group_dto import PagerObjectGroupDto
from swagger_client.models.pager_object_group_rule_dto import PagerObjectGroupRuleDto
from swagger_client.models.pager_peer_dto import PagerPeerDto
from swagger_client.models.pager_plugin_dto import PagerPluginDto
from swagger_client.models.pager_plugin_indicator_type_dto import PagerPluginIndicatorTypeDto
from swagger_client.models.pager_plugin_indicator_type_dto_v1 import PagerPluginIndicatorTypeDtoV1
from swagger_client.models.pager_plugin_object_type_dto import PagerPluginObjectTypeDto
from swagger_client.models.pager_plugin_object_type_dto_v1 import PagerPluginObjectTypeDtoV1
from swagger_client.models.pager_policy_dto import PagerPolicyDto
from swagger_client.models.pager_report_dto import PagerReportDto
from swagger_client.models.pager_report_folder_dto import PagerReportFolderDto
from swagger_client.models.pager_role_permission_dto import PagerRolePermissionDto
from swagger_client.models.pager_status_map_dto import PagerStatusMapDto
from swagger_client.models.pager_tag_indicator_types_dto import PagerTagIndicatorTypesDto
from swagger_client.models.pager_tags_dto import PagerTagsDto
from swagger_client.models.pager_top_n_view_dto import PagerTopNViewDto
from swagger_client.models.pager_user_dto import PagerUserDto
from swagger_client.models.pager_user_role_dto import PagerUserRoleDto
from swagger_client.models.pager_work_hours_group_dto import PagerWorkHoursGroupDto
from swagger_client.models.pairlongint import Pairlongint
from swagger_client.models.pairlonglong import Pairlonglong
from swagger_client.models.password_dto import PasswordDto
from swagger_client.models.peer_dto import PeerDto
from swagger_client.models.peer_status import PeerStatus
from swagger_client.models.performance_metrics_data_dto import PerformanceMetricsDataDto
from swagger_client.models.performance_metrics_dto import PerformanceMetricsDto
from swagger_client.models.performance_metrics_group import PerformanceMetricsGroup
from swagger_client.models.performance_metrics_group_v1 import PerformanceMetricsGroupV1
from swagger_client.models.performance_metrics_indicator import PerformanceMetricsIndicator
from swagger_client.models.performance_metrics_indicator_types import PerformanceMetricsIndicatorTypes
from swagger_client.models.performance_metrics_indicator_types_v1 import PerformanceMetricsIndicatorTypesV1
from swagger_client.models.performance_metrics_indicator_v1 import PerformanceMetricsIndicatorV1
from swagger_client.models.performance_metrics_request_dto import PerformanceMetricsRequestDto
from swagger_client.models.performance_metrics_request_dto_v1 import PerformanceMetricsRequestDtoV1
from swagger_client.models.performance_metrics_resource import PerformanceMetricsResource
from swagger_client.models.performance_metrics_resource_v1 import PerformanceMetricsResourceV1
from swagger_client.models.performance_metrics_response_dto import PerformanceMetricsResponseDto
from swagger_client.models.performance_metrics_response_dto_v1 import PerformanceMetricsResponseDtoV1
from swagger_client.models.performance_metrics_result_dto import PerformanceMetricsResultDto
from swagger_client.models.performance_metrics_settings import PerformanceMetricsSettings
from swagger_client.models.performance_metrics_settings_v1 import PerformanceMetricsSettingsV1
from swagger_client.models.performance_metrics_visualization import PerformanceMetricsVisualization
from swagger_client.models.performance_metrics_visualization_v1 import PerformanceMetricsVisualizationV1
from swagger_client.models.plugin_dto import PluginDto
from swagger_client.models.plugin_indicator_type_dto import PluginIndicatorTypeDto
from swagger_client.models.plugin_indicator_type_dto_v1 import PluginIndicatorTypeDtoV1
from swagger_client.models.plugin_indicator_type_filter_dto import PluginIndicatorTypeFilterDto
from swagger_client.models.plugin_indicator_type_request_dto import PluginIndicatorTypeRequestDto
from swagger_client.models.plugin_indicator_type_request_dto_v1 import PluginIndicatorTypeRequestDtoV1
from swagger_client.models.plugin_info import PluginInfo
from swagger_client.models.plugin_object_type_dto import PluginObjectTypeDto
from swagger_client.models.plugin_object_type_dto_v1 import PluginObjectTypeDtoV1
from swagger_client.models.plugin_object_type_filter_dto import PluginObjectTypeFilterDto
from swagger_client.models.plugin_object_type_request_dto import PluginObjectTypeRequestDto
from swagger_client.models.plugin_object_type_request_dto_v1 import PluginObjectTypeRequestDtoV1
from swagger_client.models.policy_dto import PolicyDto
from swagger_client.models.raw_data_setting import RawDataSetting
from swagger_client.models.raw_data_setting_v1 import RawDataSettingV1
from swagger_client.models.raw_data_settings import RawDataSettings
from swagger_client.models.raw_data_settings_v1 import RawDataSettingsV1
from swagger_client.models.report_data_dto import ReportDataDto
from swagger_client.models.report_dto import ReportDto
from swagger_client.models.report_folder_dto import ReportFolderDto
from swagger_client.models.report_request_dto import ReportRequestDto
from swagger_client.models.reporting_link_data import ReportingLinkData
from swagger_client.models.response_entity import ResponseEntity
from swagger_client.models.result_limit_setting import ResultLimitSetting
from swagger_client.models.result_limit_setting_v1 import ResultLimitSettingV1
from swagger_client.models.result_node import ResultNode
from swagger_client.models.role import Role
from swagger_client.models.role_filter_dto import RoleFilterDto
from swagger_client.models.role_permission_dto import RolePermissionDto
from swagger_client.models.schedule_instance_dto import ScheduleInstanceDto
from swagger_client.models.severity import Severity
from swagger_client.models.sign_in_response_dto import SignInResponseDto
from swagger_client.models.source_fields_setting import SourceFieldsSetting
from swagger_client.models.status_map_attachment_request_dto import StatusMapAttachmentRequestDto
from swagger_client.models.status_map_attachment_request_dto_v1 import StatusMapAttachmentRequestDtoV1
from swagger_client.models.status_map_attachment_resource import StatusMapAttachmentResource
from swagger_client.models.status_map_attachment_resource_v1 import StatusMapAttachmentResourceV1
from swagger_client.models.status_map_attachment_response_dto import StatusMapAttachmentResponseDto
from swagger_client.models.status_map_attachment_response_dto_v1 import StatusMapAttachmentResponseDtoV1
from swagger_client.models.status_map_attachment_visualization import StatusMapAttachmentVisualization
from swagger_client.models.status_map_attachment_visualization_v1 import StatusMapAttachmentVisualizationV1
from swagger_client.models.status_map_dto import StatusMapDto
from swagger_client.models.status_map_request_dto import StatusMapRequestDto
from swagger_client.models.table_setting import TableSetting
from swagger_client.models.tag_indicator_types_dto import TagIndicatorTypesDto
from swagger_client.models.tags_dto import TagsDto
from swagger_client.models.telephony_attachment_aggregation import TelephonyAttachmentAggregation
from swagger_client.models.telephony_attachment_aggregation_v1 import TelephonyAttachmentAggregationV1
from swagger_client.models.telephony_attachment_request_dto import TelephonyAttachmentRequestDto
from swagger_client.models.telephony_attachment_request_dto_v1 import TelephonyAttachmentRequestDtoV1
from swagger_client.models.telephony_attachment_response_dto import TelephonyAttachmentResponseDto
from swagger_client.models.telephony_attachment_response_dto_v1 import TelephonyAttachmentResponseDtoV1
from swagger_client.models.telephony_attachment_settings import TelephonyAttachmentSettings
from swagger_client.models.telephony_attachment_settings_v1 import TelephonyAttachmentSettingsV1
from swagger_client.models.telephony_attachment_visualization import TelephonyAttachmentVisualization
from swagger_client.models.telephony_attachment_visualization_v1 import TelephonyAttachmentVisualizationV1
from swagger_client.models.telephony_setting import TelephonySetting
from swagger_client.models.time_range import TimeRange
from swagger_client.models.time_range_dto import TimeRangeDto
from swagger_client.models.time_range_v1 import TimeRangeV1
from swagger_client.models.time_setting import TimeSetting
from swagger_client.models.time_setting_v1 import TimeSettingV1
from swagger_client.models.time_settings import TimeSettings
from swagger_client.models.timespan_between import TimespanBetween
from swagger_client.models.timestamp_description import TimestampDescription
from swagger_client.models.timezone_dto import TimezoneDto
from swagger_client.models.token import Token
from swagger_client.models.top_n_aggregation_setting import TopNAggregationSetting
from swagger_client.models.top_n_data_dto import TopNDataDto
from swagger_client.models.top_n_extra_indicator import TopNExtraIndicator
from swagger_client.models.top_n_request_dto import TopNRequestDto
from swagger_client.models.top_n_request_dto_v1 import TopNRequestDtoV1
from swagger_client.models.top_n_resource import TopNResource
from swagger_client.models.top_n_resource_v1 import TopNResourceV1
from swagger_client.models.top_n_response_dto import TopNResponseDto
from swagger_client.models.top_n_response_dto_v1 import TopNResponseDtoV1
from swagger_client.models.top_n_result_dto import TopNResultDto
from swagger_client.models.top_n_run_report_request_dto import TopNRunReportRequestDto
from swagger_client.models.top_n_run_report_result_dto import TopNRunReportResultDto
from swagger_client.models.top_n_setting import TopNSetting
from swagger_client.models.top_n_setting_v1 import TopNSettingV1
from swagger_client.models.top_n_settings import TopNSettings
from swagger_client.models.top_n_settings_v1 import TopNSettingsV1
from swagger_client.models.top_n_view_dto import TopNViewDto
from swagger_client.models.top_n_visualization import TopNVisualization
from swagger_client.models.top_n_visualization_v1 import TopNVisualizationV1
from swagger_client.models.top_n_work_hours_setting import TopNWorkHoursSetting
from swagger_client.models.topology_attachment_dto import TopologyAttachmentDto
from swagger_client.models.topology_attachment_filters import TopologyAttachmentFilters
from swagger_client.models.topology_attachment_request_dto import TopologyAttachmentRequestDto
from swagger_client.models.topology_attachment_resource import TopologyAttachmentResource
from swagger_client.models.topology_attachment_response_dto import TopologyAttachmentResponseDto
from swagger_client.models.topology_attachment_result_dto import TopologyAttachmentResultDto
from swagger_client.models.topology_attachment_settings import TopologyAttachmentSettings
from swagger_client.models.topology_layout import TopologyLayout
from swagger_client.models.topology_visualization import TopologyVisualization
from swagger_client.models.unit_info_dto import UnitInfoDto
from swagger_client.models.units_setting import UnitsSetting
from swagger_client.models.user_dto import UserDto
from swagger_client.models.user_filter_dto import UserFilterDto
from swagger_client.models.user_preferences_dto import UserPreferencesDto
from swagger_client.models.user_request_dto import UserRequestDto
from swagger_client.models.user_role_dto import UserRoleDto
from swagger_client.models.visualization_csv_setting import VisualizationCsvSetting
from swagger_client.models.visualization_table_setting import VisualizationTableSetting
from swagger_client.models.visualization_table_setting_v1 import VisualizationTableSettingV1
from swagger_client.models.work_hours_group_dto import WorkHoursGroupDto
from swagger_client.models.work_hours_relative_time_dto import WorkHoursRelativeTimeDto
from swagger_client.models.work_hours_setting import WorkHoursSetting
|
from jinja2_htmltemplate.template import Template
from nose.tools import eq_
def test_jinja_template():
t = Template('<title><TMPL_VAR NAME="foo"></title>')
eq_(t.render(foo="Hello World!"), "<title>Hello World!</title>")
def test_jinja_loop():
t = Template('''
<TMPL_LOOP NAME="loop">
Item: <TMPL_VAR NAME="item">
Price: <TMPL_VAR NAME="price">
---
</TMPL_LOOP>
''')
out = '''
Item: item 1
Price: 1000
---
Item: item 2
Price: 2000
---
'''
eq_(t.render(
loop=[
{"item": "item 1", "price": 1000},
{"item": 'item 2', "price": 2000}
]),
out,
msg='loop')
|
# Copyright (c) 2017 Ruud de Jong
# This file is part of the SlipLib project which is released under the MIT license.
# See https://github.com/rhjdjong/SlipLib for details.
__version__ = '0.3.0'
|
from typing import List
import os
import json
import pandas as pd
def get_config() -> dict:
config_path = os.path.join(os.path.split(os.path.dirname(__file__))[0], "config.json")
config = json.load(open(config_path, 'r'))
return config
def get_root_path() -> str:
""" Read root path from config.json file"""
return get_config()["ROOT"]
def get_path(path_type) -> str:
path_list = ["data", "tasks", "setups", "loggers", "experiments", "mice", "prot","users.txt"]
assert path_type in path_list, "PATH must be one of {}".format(path_list)
return os.path.join(get_root_path(), path_type)
def get_paths() -> List[str]:
path_list = ["data", "tasks", "setups", "loggers", "experiments", "mice", "prot"]
return list(map(get_path, path_list))
def create_user_file() -> None:
""" Create the root file with system email information """
root = get_root_path()
user_path = os.path.join(root, "users.txt")
if not os.path.isfile(user_path):
config = get_config()
with open(user_path, 'w') as f:
f.write('system_email: "{}"'.format(config["System_email"]))
f.write('password: "{}"'.format(config["System_password"]))
def create_paths_and_empty_csvs(all_paths) -> None:
# This is thee data directory, doesn't have an empty csv in it
if not os.path.isdir(all_paths[0]):
os.mkdir(all_paths[0])
for pth in all_paths[1:]:
if not os.path.isdir(pth):
os.mkdir(pth)
create_empty_csv(pth)
# Experiment defines the overall experiment that is being run with these mice
# Protocol defines the current protocol, within a given experiment that is beign use
# User defines what user is currently using this setup
def create_empty_csv(pth: str) -> None:
""" Should probably use an enum here """
fp = None
# set variables for tasks, what to store about them
if "task" in pth:
df = pd.DataFrame(columns=['Name', 'User_added'])
fp = os.path.join(pth, 'tasks.csv')
# set variables for experiments what to store about them
elif "experiment" in pth:
df = pd.DataFrame(columns=['Name', 'Setups', 'Subjects', 'n_subjects', 'User', 'Protocol', 'Active', 'Persistent_variables'])
fp = os.path.join(pth, 'experiments.csv')
# set variables for setups what to store about them
elif "setup" in pth:
df = pd.DataFrame(columns=['Setup_ID', 'COM', 'COM_AC', 'in_use', 'connected', 'User',
'Experiment', 'Protocol', 'Mouse_training', 'AC_state', 'Door_Mag', 'Door_Sensor', 'n_mice',
'mice_in_setup', 'logger_path'])
fp = os.path.join(pth, 'setups.csv')
# set variables for mice what to store about them
elif "mice" in pth:
df = pd.DataFrame(columns=['Mouse_ID', 'RFID', 'Sex', 'Age', 'Experiment',
'Protocol', 'Stage', 'Task', 'User', 'Start_date', 'Current_weight',
'Start_weight', 'is_training', 'is_assigned',
'training_log', 'Setup_ID', 'in_system', 'summary_variables', 'persistent_variables',
'set_variables'])
fp = os.path.join(pth, 'mice.csv')
if (fp is not None) and (not os.path.isfile(fp)):
df.to_csv(fp, index=False)
|
from django.urls import include, path
from .views import classroom, students, professors, coordinators, planners, sutdadmin
urlpatterns = [
path('', classroom.home, name='home'),
path('403', classroom.ForbiddenView.as_view(), name='403'),
path('icsconvert', classroom.ICSConverterView.as_view(), name="icsconvert"),
path('students/', include(([
path('', students.StudentMainView.as_view(), name='student_main'),
], 'classroom'), namespace='students')),
path('professors/', include(([
path('', professors.ProfessorMainView.as_view(), name='professor_main'),
path('submitdetails', professors.SubmitCourseDetailsView.as_view(),
name='submitdetails'),
path('details', professors.DetailsListView.as_view(), name='details'),
path('details/edit/<int:pk>',
professors.DetailsEditView.as_view(), name='editdetails'),
path('details/delete/<int:pk>',
professors.DetailsDeleteView.as_view(), name='deletedetails'),
], 'classroom'), namespace='professors')),
path('coordinators/', include(([
path('', coordinators.CoordinatorMainView.as_view(),
name='coordinator_main'),
path('accounts', coordinators.CoordinatorAccountsListView.as_view(),
name='accountlist'),
path('suggest/<int:pk>', coordinators.ScheduleEditView.as_view(),
name='suggestedits'),
path('approve/<int:pk>', coordinators.ScheduleApproveView.as_view(),
name='approvesuggestion'),
path('conflicts', coordinators.ScheduleConflictView.as_view(), name="conflicts")
], 'classroom'), namespace='coordinators')),
path('planners/', include(([
path('', planners.PlannerMainView.as_view(), name='planner_main'),
path('export', planners.PreferencesCSVExportView.as_view(), name="exportcsv"),
path('upload', planners.csv_upload, name="uploaddata"),
path('phase', planners.CurrentPhase.as_view(), name='currentphase'),
path('nextphase', planners.NextPhase.as_view(), name="nextphase"),
path('prevphase', planners.PreviousPhase.as_view(), name="prevphase"),
path('downloadsample', planners.SampleDownloadView.as_view(),
name="downloadsample"),
path('revert', planners.RevertToPhase1.as_view(), name="revert"),
path('acceptlist', planners.AcceptSuggestionsListView.as_view(),
name="acceptlist"),
path('accept/<int:pk>', planners.AcceptSuggestion.as_view(), name="accept"),
path('finalise', planners.FinaliseView.as_view(), name="finalise"),
path('finalcalendar', planners.FinalisedCalendarView.as_view(),
name="finalcalendar"),
], 'classroom'), namespace='planners')),
path('sutdadmin/', include(([
path('', sutdadmin.SutdAdminMainView.as_view(), name='sutdadmin_main'),
path('makebooking', sutdadmin.MakeBookingView.as_view(), name='makebooking'),
path('bookings', sutdadmin.BookingList.as_view(), name='bookings'),
path('bookings/edit/<int:pk>',
sutdadmin.EditBookingView.as_view(), name='editbooking'),
path('bookings/delete/<int:pk>',
sutdadmin.DeleteBookingView.as_view(), name='deletebooking'),
path('bookings/viewcalendar',
sutdadmin.AdminCalendarView.as_view(), name='viewcalendar'),
], 'classroom'), namespace='sutdadmin')),
]
|
from flask import request
from flask_templates import app
from functools import wraps
def support_jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f(*args, **kwargs)) + ')'
return app.response_class(content,
mimetype='application/javascript')
else:
return f(*args, **kwargs)
return decorated_function
|
import pdb
import os
import pandas as pd
import numpy as np
from pymatbridge import Matlab
from utilities import prepare_markov_data, introduce_inhibs, score_network, score_predictions
def network_hill(panel, prior_graph=[], lambdas=[], max_indegree=3, reg_mode='full', stdise=1, silent=0, maxtime=120):
'''
run_hill(panel)
input: dataframe
should be a T x N dataframe with T time points and N samples.
output: dict containing key 'e' and key 'i' from Hill's code
'''
from scipy.io import savemat
from scipy.io import loadmat
# start matlab
mlab = Matlab(maxtime=maxtime)
mlab.start()
# .mat shuttle files
# add path check
inPath = os.path.join('..', 'cache', 'dbn_wrapper_in.mat')
outPath = os.path.join('..', 'cache', 'dbn_wrapper_out.mat')
D = np.transpose(panel.values)
num_rows = np.shape(D)[0]
num_cols = np.shape(D)[1]
D = np.reshape(D, (num_rows, num_cols, 1))
#D = np.transpose(panel, (2,1,0))
# save the matlab object that the DBN wrapper will load
# contains all the required parameters for the DBN code
savemat(inPath, {"D" : D,
"max_indegree" : max_indegree,
"prior_graph" : prior_graph,
"lambdas" : lambdas,
"reg_mode" : reg_mode,
"stdise" : stdise,
"silent" : silent})
# DBN wrapper just needs an input and output path
args = {"inPath" : inPath, "outPath" : outPath}
# call DBN code
res = mlab.run_func('dbn_wrapper.m', args, maxtime=maxtime)
mlab.stop()
out = loadmat(outPath)
edge_prob = pd.DataFrame(out['e'], index=panel.columns, columns=panel.columns)
edge_sign = pd.DataFrame(out['i'], index=panel.columns, columns=panel.columns)
#edge_prob = out['e']
#edge_sign = out['i']
return (edge_prob, edge_sign)
def do_gbr(X, Y, n_estimators=100, learning_rate=0.1, max_depth=5, ignore_self_loops=False, loss='ls', verbose=False):
'''does gbr on design matrix.
returns dict regGBR, one GBR for each column in the target (Y) matrix
do this and then do do_gbr_build_adj_matrix, which will give you the A-matrix from the feature importances
'''
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.preprocessing import Imputer
regGBR = {}
for target in Y.columns:
if verbose:
print target
if ignore_self_loops:
X.ix[:, target] = 0
# get target values
y = Y[target].values
regGBR[target] = GradientBoostingRegressor(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
loss=loss)
regGBR[target].fit(X, y)
if verbose:
if target is 'PKA':
print X.columns
print y
print regGBR[target].feature_importances_
return regGBR
def do_gbr_build_adj_matrix(regGBR, full_node_list):
'''take as input the regGBR object, build an adjacency matrix out of each stimulus
'''
adj_dict = {}
target_nodes = regGBR.keys()
num_nodes = len(target_nodes)
adj = np.zeros((num_nodes, num_nodes), dtype='f')
for nidx1, node1 in enumerate(target_nodes):
features = {node : regGBR[node1].feature_importances_[tn] for tn,node in enumerate(full_node_list)}
for nidx2, node2 in enumerate(target_nodes):
adj[nidx1, nidx2] = features[node2]
adj = adj.T
adj = pd.DataFrame(adj, index=target_nodes, columns=target_nodes)
return adj
def timeseries_gbr(regGBR, scaler, scaler_cols, data, node_list, stims, inhibs, cov_columns, times, test_inhib_targets, dataset='experimental'):
'''takes a regGBR object (output of do_GBR)
and predicts timeseries for inhibited nodes
you supply the normalization scalar, original data, list of stimulii,
list of inhibitors present in data, and prediction timepoints
inhibitors should be an empty list if you chose not to model inhibition
returns pred_dict, after which you would generally call write_midas, looping
over the node_list
'''
if dataset is 'experimental':
control_inhib = 'DMSO'
elif dataset is 'insilico':
control_inhib = 'None'
num_inhibs = len(inhibs)
if num_inhibs > 0:
has_inhibs = True
else:
has_inhibs = False
num_cov = len(cov_columns)
pred_dict = {}
for test_inhib in test_inhib_targets:
print test_inhib
pred_dict[test_inhib] = {}
for stim in stims:
# set up new df to use, and fill t=0 values
pred_df = pd.DataFrame(np.zeros((len(times), num_cov)), index=times, columns=cov_columns)
pred_df.ix[0, scaler_cols] = data.groupby(['Inhibitor', 'Stimulus', 'Timepoint']).mean().ix[control_inhib, stim, 0]
pred_df.ix[0, test_inhib_targets[test_inhib]] = 0
# loop over times
for tidx in range(1,len(times)):
time = times[tidx]
# get covariates for this time step and scale
# covariates_df = scaler.transform(pred_df.ix[times[tidx-1], :])
covariates_df = ((pred_df.ix[times[tidx-1], :]) - scaler.mean_) / scaler.std_
# zero out covariate we are inhibiting
try:
covariates_df.ix[test_inhib_targets[test_inhib]] = 0
except:
pass
covariates = np.zeros((num_cov,))
covariates[:] = covariates_df.values
# loop over proteins to get values for current time step
for p in node_list:
pred_df.ix[time, p] = regGBR[p].predict(covariates)
# zero out covariate we are inhibiting, again
pred_df.ix[time, test_inhib_targets[test_inhib]] = 0
# add the pred_df to the dict, keeping only the appropriate columns
pred_dict[test_inhib][stim] = pred_df.ix[:, scaler_cols]
return pred_dict
def do_gbr_cv(X, Y, verbose=False, n_estimators=100, learning_rate=0.1, loss='ls', ignore_self_loops=False, max_depth=5):
'''this is just dumped in here. does gbr with cv
'''
from sklearn import cross_validation
from sklearn.ensemble import GradientBoostingRegressor
n_folds = 5
kf = list(cross_validation.KFold(X.shape[0], n_folds=n_folds, shuffle=True))
regGBR = {}
test_score = {}
mse = {}
for target in Y.columns:
if verbose:
print target
# get target values
y = Y[target].values
regGBR[target] = []
test_score[target] = []
mse[target] = []
if ignore_self_loops:
X.ix[:, target] = 0
for fold in range(n_folds):
if verbose:
print 'cv fold ', fold
X_train, y_train = X.ix[kf[fold][0],:], y[kf[fold][0]]
X_test, y_test = X.ix[kf[fold][1],:], y[kf[fold][1]]
regGBR[target].append(GradientBoostingRegressor(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
loss=loss))
regGBR[target][fold].fit(X_train, y_train)
test_score[target].append(np.zeros((n_estimators,), dtype=np.float64))
mse[target].append(np.zeros((n_estimators,), dtype=np.float64))
for i, y_pred in enumerate(regGBR[target][fold].staged_decision_function(X_test)):
test_score[target][fold][i] = regGBR[target][fold].loss_(y_test, y_pred)
mse[target][fold][i] = score_predictions(y_test, y_pred)
return regGBR, test_score, mse
def network_lasso(data, response_type='level', ground_truth=None, inhib_targets=None, perfect=True, group_stimuli=False):
'''
do lasso. automatically do CV to find best alpha.
input:
data
response_type : (level, rate)
ground_truth : adjacency matrix
group_stimuli : binary
'''
from sklearn import preprocessing, linear_model, cross_validation, metrics
# model interventions if supplied an inhib_targets dict
if inhib_targets:
training_dict = prepare_markov_data(introduce_inhibs(data, inhib_targets=inhib_targets, perfect=perfect), response_type, group_stimuli)
else:
training_dict = prepare_markov_data(data, response_type, group_stimuli)
antibodies = [col for col in data.columns if col not in ['Cell Line', 'Inhibitor', 'Stimulus', 'Timepoint']]
stims = set(data['Stimulus'])
# fit lasso for each (X,Y) pair
A = {}
for key in training_dict:
X = training_dict[key][0]
Y = training_dict[key][1]
preprocessing.StandardScaler().fit_transform(X)
A[key] = pd.DataFrame(np.zeros((X.shape[1], X.shape[1])), columns=X.columns, index=X.columns)
for col in Y.columns:
#print col
# check if col is not all the identical
if len(set(Y[col])) > 1:
rgn = linear_model.LassoCV(verbose=False).fit(X, Y[col])
if np.max(rgn.coef_) != 0:
A[key].ix[:,col] = np.abs(rgn.coef_) / np.abs(rgn.coef_).max()
else:
A[key].ix[:,col] = np.zeros((X.shape[1],))
if ground_truth:
auc = {}
for key in training_dict:
auc[key] = score_network(A[key], ground_truth)
return A, auc
else:
return A
|
#!flask/bin/python
from flask import Flask
import flask
from flask import Flask, jsonify, abort, request, make_response, url_for
import json_unpacker
import matching_model
from user import User
from team import Team
import user
import json
import clustering as clst
def extract_users(req):
exper_data,users = ([],[])
for user in req['users']:
exper_data.append([float(data) for data in user['ranks']])
if "history" in user:
users.append(User(exper_data[-1],user['pid'],user['history']))
else:
users.append(User(exper_data[-1],user['pid']))
return exper_data,users
def send_teams_as_json(teams): #this method currently uses the classes defined for bidding
json_obj = [[user.pid for user in team.members] for team in teams]
return flask.Response(json.dumps({"teams":json_obj,"users":flask.request.json['users']}), mimetype='application/json')
def extract_task_data(req):
#extract json data and convert to python object here
#do not necessarily have to use user class here, it is already defined if you would like to use it
return req
def send_assigned_tasks_as_json(tasks):
#convert python objects to simple maps and lists
return flask.Response(json.dumps({"info":tasks}))
app = Flask(__name__)
@app.route('/merge_teams',methods=['POST'])
def clstbuild():
if not 'users' in flask.request.json or not 'max_team_size' in flask.request.json or sum([not 'ranks' in user or not 'pid' in user for user in flask.request.json['users']]) > 0:
flask.abort(400)
data,users = extract_users(flask.request.json)
teams,users = clst.kmeans_assignment(data,users, flask.request.json['max_team_size'])
return send_teams_as_json(teams)
@app.route("/match", methods=['POST']) #using the post method with /match in the url to get the required app route
def matching():
if not request.json: #will abort the request if it fails to load the json
abort(400) #will have a return status of 400 in case of failure
bidding_data = json_unpacker.JsonUnpacker(request.json) #calles the json_unpacker to get the necessary bidding_data
model = matching_model.MatchingModel(bidding_data.student_ids,
bidding_data.topic_ids,
bidding_data.student_preferences_map,
bidding_data.topic_preferences_map, bidding_data.q_S) #model to get the student_ids,topic_ids,student_preference_map,topic_prefernce_map
return jsonify(model.get_matching()) #returns a json object
if __name__ == "__main__":
app.run(debug=True)
|
#
# @lc app=leetcode id=140 lang=python3
#
# [140] Word Break II
#
from typing import List
# @lc code=start
class Solution:
def __init__(self):
self.hashMap = {}
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
if s in self.hashMap:
return self.hashMap[s]
if not s:
return [""]
res = []
for word in wordDict:
n = len(word)
if s[:n] == word:
remainList = self.wordBreak(s[n:], wordDict)
for rem in remainList:
print(rem)
tmp = word
if rem:
tmp += ' ' + rem
res.append(tmp)
self.hashMap[s] = res
return res
def wordBreakDfs(self, s: str, wordDict: List[str]) -> List[str]:
cur = []
res = []
self.helperDfs(s, cur, res, wordDict)
return res
def helperDfs(self, s, cur, res, wordDict):
if len(s) == 0:
res.append(cur)
return
for i in range(1, len(s)+1):
subS = s[:i]
if subS in wordDict:
self.helperDfs(s[i:], cur+[subS], res, wordDict)
# @lc code=end
s = "pineapplepenapple"
wordDict = ["apple", "pen", "applepen", "pine", "pineapple"]
res = Solution().wordBreak(s, wordDict)
print(res) |
#Problem 355. Design Twitter
'''
Design a simplified version of Twitter where users can post tweets, follow/unfollow another user and is able to see the 10 most recent tweets in the user's news feed. Your design should support the following methods:
postTweet(userId, tweetId): Compose a new tweet.
getNewsFeed(userId): Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
follow(followerId, followeeId): Follower follows a followee.
unfollow(followerId, followeeId): Follower unfollows a followee.
Example:
Twitter twitter = new Twitter();
// User 1 posts a new tweet (id = 5).
twitter.postTweet(1, 5);
// User 1's news feed should return a list with 1 tweet id -> [5].
twitter.getNewsFeed(1);
// User 1 follows user 2.
twitter.follow(1, 2);
// User 2 posts a new tweet (id = 6).
twitter.postTweet(2, 6);
// User 1's news feed should return a list with 2 tweet ids -> [6, 5].
// Tweet id 6 should precede tweet id 5 because it is posted after tweet id 5.
twitter.getNewsFeed(1);
// User 1 unfollows user 2.
twitter.unfollow(1, 2);
// User 1's news feed should return a list with 1 tweet id -> [5],
// since user 1 is no longer following user 2.
twitter.getNewsFeed(1);
Observation:
- Followee cannot see the follower's twitters, if they didn't follow them on their newsfeed
- Follower can see their own tweets and the tweets of people they followed.
- Follower cannot see the tweets of people that their followee followed.
- Every tweets are unique to themselves.
Requirements:
- We need a data structure that can:
+ Keep track of the tweets that each user can see, i.e, their own tweets and followee's tweets
+ Make sure that the tweets of people that they did not follow cannont be seen by them
+ Keep track of the tweets owner, example: user 1 created and posted tweet 2; user 2 created and posted tweet 3
'''
class Twitter(object):
#constructor:
def __init__(self):
#Hash map to store the user/user_tweets and followers/followeeID:
self.userWithTweets = {}
self.followersWithFollowee = {}
#a variable to keep track of the time the tweet is created
self.assign_priority = 0
#Function to post tweet:
'''
Compose a new tweet.
:type userId: int
:type tweetId: int
:rtype: None
'''
def postTweet(self, userId, tweetId):
self.assign_priority += 1
#Adding the userID and the tweetID into the dictionary
#if the user has created a tweet before then we will just keep appending new tweets into the dictionary
if userId in self.userWithTweets:
self.userWithTweets[userId].add((tweetId, self.assign_priority))
else:
self.userWithTweets[userId] = set([(tweetId, self.assign_priority)])
return self.userWithTweets
#Function to get news feed:
'''
Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
:type userId: int
:rtype: List[int]
'''
def getNewsFeed(self, userId):
#array to store the value of all followees that the user follows
arrayOfUserIds = [userId]
result = []
if userId in self.followersWithFollowee:
for followee in self.followersWithFollowee[userId]:
arrayOfUserIds.append(followee)
#helper method to get all tweets belong those user ids
def getTweetIds(array):
retArr = []
for user in array:
if user in self.userWithTweets:
for tweet in self.userWithTweets[user]:
retArr.append(tweet)
return retArr
tweets = getTweetIds(arrayOfUserIds)
while len(tweets) > 10:
tweets.pop()
tweets = sorted(tweets, key=lambda posts: posts[1], reverse=True)
for tweet in tweets:
result.append(tweet[0])
return result
#Function to follow a user:
'''
Follower follows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: None
'''
def follow(self, followerId, followeeId):
#adding the followerid and followeeId relationship into the hash map
if followerId in self.followersWithFollowee:
self.followersWithFollowee[followerId].add(followeeId)
else:
self.followersWithFollowee[followerId] = set([followeeId])
return self.followersWithFollowee
#Function to unfollow a user:
"""
Follower unfollows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: None
"""
def unfollow(self, followerId, followeeId):
#find the follower and look through the followeeId and remove the unfollowed user
if followerId in self.followersWithFollowee:
if followeeId in self.followersWithFollowee[followerId]:
self.followersWithFollowee[followerId].remove(followeeId)
return self.followersWithFollowee
#Main function to run the test cases:
def main():
print("Testing DESIGN TWITTER...")
# #Testing component:
obj = Twitter()
obj.postTweet(1,5)
obj.postTweet(1,2)
obj.postTweet(3, 10)
param_2 = obj.getNewsFeed(1)
print(param_2)
obj.follow(1,3)
param_2 = obj.getNewsFeed(1)
print(param_2)
#print(obj.follow(3, 6))
#print(obj.unfollow(1, 3))
print("END OF TESTING...")
main()
|
def calc(x,y):
# z = x + y, x - y, x / y, x * y
# return z
return x + y, x - y, x / y, x * y
# s = calc(4,5)
# print(s[0], s[1])
a,b,c,d = calc(4,5)
print(a,b,c,d) |
import sys
_module = sys.modules[__name__]
del sys
config = _module
dataset = _module
preprocess_images = _module
train = _module
train_blend = _module
utils = _module
config = _module
dataset = _module
train = _module
utils = _module
config = _module
dataset = _module
extract_images_from_csv = _module
train = _module
utils = _module
dataset = _module
train = _module
utils = _module
sort_w_attention = _module
utils = _module
googLeNet = _module
import_all_networks = _module
lenet = _module
resnet = _module
vgg = _module
train = _module
import_utils = _module
mnist_data = _module
utils = _module
build_vocabulary = _module
create_freq_vectors = _module
naivebayes = _module
generating_names = _module
main = _module
classification = _module
detection = _module
full_pytorch_example = _module
segmentation = _module
custom_dataset = _module
loader_customtext = _module
pytorch_bidirectional_lstm = _module
pytorch_init_weights = _module
pytorch_loadsave = _module
pytorch_lr_ratescheduler = _module
pytorch_mixed_precision_example = _module
pytorch_pretrain_finetune = _module
pytorch_progress_bar = _module
pytorch_rnn_gru_lstm = _module
pytorch_simple_CNN = _module
pytorch_simple_fullynet = _module
pytorch_std_mean = _module
pytorch_tensorbasics = _module
pytorch_tensorboard_ = _module
pytorch_transforms = _module
pytorch_set_seeds = _module
lenet5_pytorch = _module
pytorch_efficientnet = _module
pytorch_inceptionet = _module
pytorch_resnet = _module
pytorch_vgg_implementation = _module
fc_gan = _module
model = _module
train = _module
model = _module
train = _module
model = _module
train = _module
utils = _module
config = _module
dataset = _module
discriminator_model = _module
generator_model = _module
train = _module
utils = _module
config = _module
dataset = _module
loss = _module
model = _module
train = _module
utils = _module
config = _module
dataset = _module
discriminator_model = _module
generator_model = _module
train = _module
utils = _module
config = _module
model = _module
train = _module
utils = _module
config = _module
dataset = _module
loss = _module
model = _module
train = _module
utils = _module
config = _module
make_resized_data = _module
model = _module
prepare_data = _module
train = _module
utils = _module
dataset = _module
model = _module
train = _module
utils = _module
seq2seq = _module
utils = _module
seq2seq_attention = _module
utils = _module
model = _module
train = _module
get_loader = _module
model = _module
train = _module
utils = _module
nst = _module
seq2seq_transformer = _module
utils = _module
torchtext_tutorial1 = _module
torchtext_tutorial2 = _module
torchtext_tutorial3 = _module
transformer_from_scratch = _module
generate_csv = _module
dataset = _module
loss = _module
model = _module
train = _module
utils = _module
config = _module
dataset = _module
loss = _module
model = _module
train = _module
utils = _module
iou = _module
mean_avg_precision = _module
nms = _module
augmentations = _module
config = _module
dataset = _module
model = _module
train = _module
utils = _module
process_data = _module
train_isic = _module
tutorial8_keras_subclassing = _module
alexnet = _module
test = _module
block = _module
googlenet = _module
lenet5 = _module
vggnet = _module
decision_tree = _module
kmeansclustering = _module
knn = _module
linear_regression_gradient_descent = _module
linear_regression_normal_equation = _module
logistic_regression = _module
NN = _module
random_forest = _module
svm = _module
metrics = _module
LinearRegression_GD = _module
LinearRegression_normal = _module
iou_test = _module
map_test = _module
nms_test = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch import nn
from torch import optim
from sklearn.metrics import cohen_kappa_score
from torchvision.utils import save_image
import warnings
import torch.nn.functional as F
import re
from sklearn.metrics import log_loss
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset
from torch.utils.data.dataset import random_split
from math import ceil
from sklearn import metrics
import torch.nn as nn
import torch.optim as optim
import random
from torch.utils.tensorboard import SummaryWriter
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.backends.cudnn as cudnn
from torch.utils.data import SubsetRandomSampler
import string
from torch.utils.data import WeightedRandomSampler
import torchvision
from torch.nn.utils.rnn import pad_sequence
import copy
import time
import torch.nn
from torchvision.models import vgg19
from math import log2
from scipy.stats import truncnorm
import torchvision.transforms.functional as TF
from torchtext.datasets import Multi30k
from torchtext.data.metrics import bleu_score
from torchvision import transforms
import torchvision.models as models
import torchvision.transforms.functional as FT
import matplotlib.patches as patches
from collections import Counter
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(nn.BatchNorm1d((1536 + 1) * 2), nn.Linear((1536 + 1) * 2, 500), nn.BatchNorm1d(500), nn.ReLU(), nn.Dropout(0.2), nn.Linear(500, 100), nn.BatchNorm1d(100), nn.ReLU(), nn.Dropout(0.2), nn.Linear(100, 2))
def forward(self, x):
return self.model(x)
class NN(nn.Module):
def __init__(self, input_size, num_classes):
"""
Here we define the layers of the network. We create two fully connected layers
Parameters:
input_size: the size of the input, in this case 784 (28x28)
num_classes: the number of classes we want to predict, in this case 10 (0-9)
"""
super(NN, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
"""
x here is the mnist images and we run it through fc1, fc2 that we created above.
we also add a ReLU activation function in between and for that (since it has no parameters)
I recommend using nn.functional (F)
Parameters:
x: mnist images
Returns:
out: the output of the network
"""
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class SelfAttention(nn.Module):
def __init__(self, embed_size, heads):
super(SelfAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert self.head_dim * heads == embed_size, 'Embedding size needs to be divisible by heads'
self.values = nn.Linear(embed_size, embed_size)
self.keys = nn.Linear(embed_size, embed_size)
self.queries = nn.Linear(embed_size, embed_size)
self.fc_out = nn.Linear(embed_size, embed_size)
def forward(self, values, keys, query, mask):
N = query.shape[0]
value_len, key_len, query_len = values.shape[1], keys.shape[1], query.shape[1]
values = self.values(values)
keys = self.keys(keys)
queries = self.queries(query)
values = values.reshape(N, value_len, self.heads, self.head_dim)
keys = keys.reshape(N, key_len, self.heads, self.head_dim)
queries = queries.reshape(N, query_len, self.heads, self.head_dim)
energy = torch.einsum('nqhd,nkhd->nhqk', [queries, keys])
if mask is not None:
energy = energy.masked_fill(mask == 0, float('-1e20'))
attention = torch.softmax(energy / self.embed_size ** (1 / 2), dim=3)
out = torch.einsum('nhql,nlhd->nqhd', [attention, values]).reshape(N, query_len, self.heads * self.head_dim)
out = self.fc_out(out)
return out
class TransformerBlock(nn.Module):
def __init__(self, embed_size, heads, dropout, forward_expansion):
super(TransformerBlock, self).__init__()
self.attention = SelfAttention(embed_size, heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(nn.Linear(embed_size, forward_expansion * embed_size), nn.ReLU(), nn.Linear(forward_expansion * embed_size, embed_size))
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, mask):
attention = self.attention(value, key, query, mask)
x = self.dropout(self.norm1(attention + query))
forward = self.feed_forward(x)
out = self.dropout(self.norm2(forward + x))
return out
class Encoder(nn.Module):
def __init__(self, src_vocab_size, embed_size, num_layers, heads, device, forward_expansion, dropout, max_length):
super(Encoder, self).__init__()
self.embed_size = embed_size
self.device = device
self.word_embedding = nn.Embedding(src_vocab_size, embed_size)
self.position_embedding = nn.Embedding(max_length, embed_size)
self.layers = nn.ModuleList([TransformerBlock(embed_size, heads, dropout=dropout, forward_expansion=forward_expansion) for _ in range(num_layers)])
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask):
N, seq_length = x.shape
positions = torch.arange(0, seq_length).expand(N, seq_length)
out = self.dropout(self.word_embedding(x) + self.position_embedding(positions))
for layer in self.layers:
out = layer(out, out, out, mask)
return out
class DecoderBlock(nn.Module):
def __init__(self, embed_size, heads, forward_expansion, dropout, device):
super(DecoderBlock, self).__init__()
self.norm = nn.LayerNorm(embed_size)
self.attention = SelfAttention(embed_size, heads=heads)
self.transformer_block = TransformerBlock(embed_size, heads, dropout, forward_expansion)
self.dropout = nn.Dropout(dropout)
def forward(self, x, value, key, src_mask, trg_mask):
attention = self.attention(x, x, x, trg_mask)
query = self.dropout(self.norm(attention + x))
out = self.transformer_block(value, key, query, src_mask)
return out
class Decoder(nn.Module):
def __init__(self, trg_vocab_size, embed_size, num_layers, heads, forward_expansion, dropout, device, max_length):
super(Decoder, self).__init__()
self.device = device
self.word_embedding = nn.Embedding(trg_vocab_size, embed_size)
self.position_embedding = nn.Embedding(max_length, embed_size)
self.layers = nn.ModuleList([DecoderBlock(embed_size, heads, forward_expansion, dropout, device) for _ in range(num_layers)])
self.fc_out = nn.Linear(embed_size, trg_vocab_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, enc_out, src_mask, trg_mask):
N, seq_length = x.shape
positions = torch.arange(0, seq_length).expand(N, seq_length)
x = self.dropout(self.word_embedding(x) + self.position_embedding(positions))
for layer in self.layers:
x = layer(x, enc_out, enc_out, src_mask, trg_mask)
out = self.fc_out(x)
return out
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def tokenize_eng(text):
return [tok.text for tok in spacy_eng.tokenizer(text)]
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, source, target, teacher_force_ratio=0.5):
batch_size = source.shape[1]
target_len = target.shape[0]
target_vocab_size = len(english.vocab)
outputs = torch.zeros(target_len, batch_size, target_vocab_size)
encoder_states, hidden, cell = self.encoder(source)
x = target[0]
for t in range(1, target_len):
output, hidden, cell = self.decoder(x, encoder_states, hidden, cell)
outputs[t] = output
best_guess = output.argmax(1)
x = target[t] if random.random() < teacher_force_ratio else best_guess
return outputs
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class Inception(nn.Module):
def __init__(self, in_channels, out1x1, out3x3reduced, out3x3, out5x5reduced, out5x5, outpool):
super().__init__()
self.branch_1 = BasicConv2d(in_channels, out1x1, kernel_size=1, stride=1)
self.branch_2 = nn.Sequential(BasicConv2d(in_channels, out3x3reduced, kernel_size=1), BasicConv2d(out3x3reduced, out3x3, kernel_size=3, padding=1))
self.branch_3 = nn.Sequential(BasicConv2d(in_channels, out5x5reduced, kernel_size=1), BasicConv2d(out5x5reduced, out5x5, kernel_size=3, padding=1), BasicConv2d(out5x5, out5x5, kernel_size=3, padding=1))
self.branch_4 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(in_channels, outpool, kernel_size=1))
def forward(self, x):
y1 = self.branch_1(x)
y2 = self.branch_2(x)
y3 = self.branch_3(x)
y4 = self.branch_4(x)
return torch.cat([y1, y2, y3, y4], 1)
class conv_block(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(conv_block, self).__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.batchnorm = nn.BatchNorm2d(out_channels)
def forward(self, x):
return self.relu(self.batchnorm(self.conv(x)))
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.7)
self.pool = nn.AvgPool2d(kernel_size=5, stride=3)
self.conv = conv_block(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.pool(x)
x = self.conv(x)
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
class Inception_block(nn.Module):
def __init__(self, in_channels, out_1x1, red_3x3, out_3x3, red_5x5, out_5x5, out_1x1pool):
super(Inception_block, self).__init__()
self.branch1 = conv_block(in_channels, out_1x1, kernel_size=1)
self.branch2 = nn.Sequential(conv_block(in_channels, red_3x3, kernel_size=1), conv_block(red_3x3, out_3x3, kernel_size=(3, 3), padding=1))
self.branch3 = nn.Sequential(conv_block(in_channels, red_5x5, kernel_size=1), conv_block(red_5x5, out_5x5, kernel_size=5, padding=2))
self.branch4 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=1, padding=1), conv_block(in_channels, out_1x1pool, kernel_size=1))
def forward(self, x):
return torch.cat([self.branch1(x), self.branch2(x), self.branch3(x), self.branch4(x)], 1)
class GoogLeNet(nn.Module):
def __init__(self, aux_logits=True, num_classes=1000):
super(GoogLeNet, self).__init__()
assert aux_logits == True or aux_logits == False
self.aux_logits = aux_logits
self.conv1 = conv_block(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2 = conv_block(64, 192, kernel_size=3, stride=1, padding=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception3a = Inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = Inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception4a = Inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = Inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = Inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = Inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = Inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception5a = Inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = Inception_block(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1)
self.dropout = nn.Dropout(p=0.4)
self.fc1 = nn.Linear(1024, num_classes)
if self.aux_logits:
self.aux1 = InceptionAux(512, num_classes)
self.aux2 = InceptionAux(528, num_classes)
else:
self.aux1 = self.aux2 = None
def forward(self, x):
x = self.conv1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.maxpool2(x)
x = self.inception3a(x)
x = self.inception3b(x)
x = self.maxpool3(x)
x = self.inception4a(x)
if self.aux_logits and self.training:
aux1 = self.aux1(x)
x = self.inception4b(x)
x = self.inception4c(x)
x = self.inception4d(x)
if self.aux_logits and self.training:
aux2 = self.aux2(x)
x = self.inception4e(x)
x = self.maxpool4(x)
x = self.inception5a(x)
x = self.inception5b(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.dropout(x)
x = self.fc1(x)
if self.aux_logits and self.training:
return aux1, aux2, x
else:
return x
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.relu = nn.ReLU()
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=0)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1, padding=0)
self.linear1 = nn.Linear(120, 84)
self.linear2 = nn.Linear(84, 10)
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool(x)
x = self.relu(self.conv2(x))
x = self.pool(x)
x = self.relu(self.conv3(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.linear1(x))
x = self.linear2(x)
return x
class residual_template(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1, identity_downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.identity_downsample = identity_downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.identity_downsample is not None:
residual = self.identity_downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, image_channels, num_classes):
super(ResNet, self).__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(image_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, layers[0], intermediate_channels=64, stride=1)
self.layer2 = self._make_layer(block, layers[1], intermediate_channels=128, stride=2)
self.layer3 = self._make_layer(block, layers[2], intermediate_channels=256, stride=2)
self.layer4 = self._make_layer(block, layers[3], intermediate_channels=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * 4, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layer(self, block, num_residual_blocks, intermediate_channels, stride):
identity_downsample = None
layers = []
if stride != 1 or self.in_channels != intermediate_channels * 4:
identity_downsample = nn.Sequential(nn.Conv2d(self.in_channels, intermediate_channels * 4, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(intermediate_channels * 4))
layers.append(block(self.in_channels, intermediate_channels, identity_downsample, stride))
self.in_channels = intermediate_channels * 4
for i in range(num_residual_blocks - 1):
layers.append(block(self.in_channels, intermediate_channels))
return nn.Sequential(*layers)
class VGG(nn.Module):
def __init__(self):
super(VGG, self).__init__()
self.chosen_features = ['0', '5', '10', '19', '28']
self.model = models.vgg19(pretrained=True).features[:29]
def forward(self, x):
features = []
for layer_num, layer in enumerate(self.model):
x = layer(x)
if str(layer_num) in self.chosen_features:
features.append(x)
return features
sequence_length = 28
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
out, _ = self.rnn(x, h0)
out = out.reshape(out.shape[0], -1)
out = self.fc(out)
return out
class BRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(BRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
self.fc = nn.Linear(hidden_size * 2, num_classes)
def forward(self, x):
h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size)
c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size)
out, _ = self.lstm(x)
out = self.fc(out[:, -1, :])
return out
class CNN(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=8, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(16 * 8 * 8, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0], -1)
return self.fc1(x)
class RNN_GRU(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN_GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size)
out, _ = self.gru(x, h0)
out = out.reshape(out.shape[0], -1)
out = self.fc(out)
return out
class RNN_LSTM(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, num_layers):
super(RNN_LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embed_size)
self.rnn = nn.LSTM(embed_size, hidden_size, num_layers)
self.fc_out = nn.Linear(hidden_size, 1)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(1), self.hidden_size)
c0 = torch.zeros(self.num_layers, x.size(1), self.hidden_size)
embedded = self.embedding(x)
outputs, _ = self.rnn(embedded, (h0, c0))
prediction = self.fc_out(outputs[-1, :, :])
return prediction
class CNNBlock(nn.Module):
def __init__(self, in_channels, out_channels, bn_act=True, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=not bn_act, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.leaky = nn.LeakyReLU(0.1)
self.use_bn_act = bn_act
def forward(self, x):
if self.use_bn_act:
return self.leaky(self.bn(self.conv(x)))
else:
return self.conv(x)
class SqueezeExcitation(nn.Module):
def __init__(self, in_channels, reduced_dim):
super(SqueezeExcitation, self).__init__()
self.se = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, reduced_dim, 1), nn.SiLU(), nn.Conv2d(reduced_dim, in_channels, 1), nn.Sigmoid())
def forward(self, x):
return x * self.se(x)
class InvertedResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, expand_ratio, reduction=4, survival_prob=0.8):
super(InvertedResidualBlock, self).__init__()
self.survival_prob = 0.8
self.use_residual = in_channels == out_channels and stride == 1
hidden_dim = in_channels * expand_ratio
self.expand = in_channels != hidden_dim
reduced_dim = int(in_channels / reduction)
if self.expand:
self.expand_conv = CNNBlock(in_channels, hidden_dim, kernel_size=3, stride=1, padding=1)
self.conv = nn.Sequential(CNNBlock(hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim), SqueezeExcitation(hidden_dim, reduced_dim), nn.Conv2d(hidden_dim, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels))
def stochastic_depth(self, x):
if not self.training:
return x
binary_tensor = torch.rand(x.shape[0], 1, 1, 1, device=x.device) < self.survival_prob
return torch.div(x, self.survival_prob) * binary_tensor
def forward(self, inputs):
x = self.expand_conv(inputs) if self.expand else inputs
if self.use_residual:
return self.stochastic_depth(self.conv(x)) + inputs
else:
return self.conv(x)
base_model = [[1, 16, 1, 1, 3], [6, 24, 2, 2, 3], [6, 40, 2, 2, 5], [6, 80, 3, 2, 3], [6, 112, 3, 1, 5], [6, 192, 4, 2, 5], [6, 320, 1, 1, 3]]
phi_values = {'b0': (0, 224, 0.2), 'b1': (0.5, 240, 0.2), 'b2': (1, 260, 0.3), 'b3': (2, 300, 0.3), 'b4': (3, 380, 0.4), 'b5': (4, 456, 0.4), 'b6': (5, 528, 0.5), 'b7': (6, 600, 0.5)}
class EfficientNet(nn.Module):
def __init__(self, version, num_classes):
super(EfficientNet, self).__init__()
width_factor, depth_factor, dropout_rate = self.calculate_factors(version)
last_channels = ceil(1280 * width_factor)
self.pool = nn.AdaptiveAvgPool2d(1)
self.features = self.create_features(width_factor, depth_factor, last_channels)
self.classifier = nn.Sequential(nn.Dropout(dropout_rate), nn.Linear(last_channels, num_classes))
def calculate_factors(self, version, alpha=1.2, beta=1.1):
phi, res, drop_rate = phi_values[version]
depth_factor = alpha ** phi
width_factor = beta ** phi
return width_factor, depth_factor, drop_rate
def create_features(self, width_factor, depth_factor, last_channels):
channels = int(32 * width_factor)
features = [CNNBlock(3, channels, 3, stride=2, padding=1)]
in_channels = channels
for expand_ratio, channels, repeats, stride, kernel_size in base_model:
out_channels = 4 * ceil(int(channels * width_factor) / 4)
layers_repeats = ceil(repeats * depth_factor)
for layer in range(layers_repeats):
features.append(InvertedResidualBlock(in_channels, out_channels, expand_ratio=expand_ratio, stride=stride if layer == 0 else 1, kernel_size=kernel_size, padding=kernel_size // 2))
in_channels = out_channels
features.append(CNNBlock(in_channels, last_channels, kernel_size=1, stride=1, padding=0))
return nn.Sequential(*features)
def forward(self, x):
x = self.pool(self.features(x))
return self.classifier(x.view(x.shape[0], -1))
class block(nn.Module):
def __init__(self, in_channels, intermediate_channels, identity_downsample=None, stride=1):
super().__init__()
self.expansion = 4
self.conv1 = nn.Conv2d(in_channels, intermediate_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(intermediate_channels)
self.conv2 = nn.Conv2d(intermediate_channels, intermediate_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(intermediate_channels)
self.conv3 = nn.Conv2d(intermediate_channels, intermediate_channels * self.expansion, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(intermediate_channels * self.expansion)
self.relu = nn.ReLU()
self.identity_downsample = identity_downsample
self.stride = stride
def forward(self, x):
identity = x.clone()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.identity_downsample is not None:
identity = self.identity_downsample(identity)
x += identity
x = self.relu(x)
return x
VGG_types = {'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']}
class VGG_net(nn.Module):
def __init__(self, in_channels=3, num_classes=1000):
super(VGG_net, self).__init__()
self.in_channels = in_channels
self.conv_layers = self.create_conv_layers(VGG_types['VGG16'])
self.fcs = nn.Sequential(nn.Linear(512 * 7 * 7, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, num_classes))
def forward(self, x):
x = self.conv_layers(x)
x = x.reshape(x.shape[0], -1)
x = self.fcs(x)
return x
def create_conv_layers(self, architecture):
layers = []
in_channels = self.in_channels
for x in architecture:
if type(x) == int:
out_channels = x
layers += [nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.BatchNorm2d(x), nn.ReLU()]
in_channels = x
elif x == 'M':
layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))]
return nn.Sequential(*layers)
class WSConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2):
super(WSConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
self.scale = (gain / (in_channels * kernel_size ** 2)) ** 0.5
self.bias = self.conv.bias
self.conv.bias = None
nn.init.normal_(self.conv.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.conv(x * self.scale) + self.bias.view(1, self.bias.shape[0], 1, 1)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2)
def forward(self, x):
x = self.leaky(self.conv1(x))
x = self.leaky(self.conv2(x))
return x
factors = [1, 1, 1, 1, 1 / 2, 1 / 4, 1 / 8, 1 / 16, 1 / 32]
class Discriminator(nn.Module):
def __init__(self, in_channels, img_channels=3):
super(Discriminator, self).__init__()
self.prog_blocks, self.rgb_layers = nn.ModuleList([]), nn.ModuleList([])
self.leaky = nn.LeakyReLU(0.2)
for i in range(len(factors) - 1, 0, -1):
conv_in = int(in_channels * factors[i])
conv_out = int(in_channels * factors[i - 1])
self.prog_blocks.append(ConvBlock(conv_in, conv_out))
self.rgb_layers.append(WSConv2d(img_channels, conv_in, kernel_size=1, stride=1, padding=0))
self.initial_rgb = WSConv2d(img_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.rgb_layers.append(self.initial_rgb)
self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.final_block = nn.Sequential(WSConv2d(in_channels + 1, in_channels, kernel_size=3, padding=1), nn.LeakyReLU(0.2), WSConv2d(in_channels, in_channels, kernel_size=4, padding=0, stride=1), nn.LeakyReLU(0.2), WSConv2d(in_channels, 1, kernel_size=1, padding=0, stride=1))
def fade_in(self, alpha, downscaled, out):
"""Used to fade in downscaled using avg pooling and output from CNN"""
return alpha * out + (1 - alpha) * downscaled
def minibatch_std(self, x):
batch_statistics = torch.std(x, dim=0).mean().repeat(x.shape[0], 1, x.shape[2], x.shape[3])
return torch.cat([x, batch_statistics], dim=1)
def forward(self, x, alpha, steps):
cur_step = len(self.prog_blocks) - steps
out = self.leaky(self.rgb_layers[cur_step](x))
if steps == 0:
out = self.minibatch_std(out)
return self.final_block(out).view(out.shape[0], -1)
downscaled = self.leaky(self.rgb_layers[cur_step + 1](self.avg_pool(x)))
out = self.avg_pool(self.prog_blocks[cur_step](out))
out = self.fade_in(alpha, downscaled, out)
for step in range(cur_step + 1, len(self.prog_blocks)):
out = self.prog_blocks[step](out)
out = self.avg_pool(out)
out = self.minibatch_std(out)
return self.final_block(out).view(out.shape[0], -1)
class WSLinear(nn.Module):
def __init__(self, in_features, out_features, gain=2):
super(WSLinear, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.scale = (gain / in_features) ** 0.5
self.bias = self.linear.bias
self.linear.bias = None
nn.init.normal_(self.linear.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
return self.linear(x * self.scale) + self.bias
class AdaIN(nn.Module):
def __init__(self, channels, w_dim):
super().__init__()
self.instance_norm = nn.InstanceNorm2d(channels)
self.style_scale = WSLinear(w_dim, channels)
self.style_bias = WSLinear(w_dim, channels)
def forward(self, x, w):
x = self.instance_norm(x)
style_scale = self.style_scale(w).unsqueeze(2).unsqueeze(3)
style_bias = self.style_bias(w).unsqueeze(2).unsqueeze(3)
return style_scale * x + style_bias
class InjectNoise(nn.Module):
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, channels, 1, 1))
def forward(self, x):
noise = torch.randn((x.shape[0], 1, x.shape[2], x.shape[3]), device=x.device)
return x + self.weight * noise
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, w_dim):
super(GenBlock, self).__init__()
self.conv1 = WSConv2d(in_channels, out_channels)
self.conv2 = WSConv2d(out_channels, out_channels)
self.leaky = nn.LeakyReLU(0.2, inplace=True)
self.inject_noise1 = InjectNoise(out_channels)
self.inject_noise2 = InjectNoise(out_channels)
self.adain1 = AdaIN(out_channels, w_dim)
self.adain2 = AdaIN(out_channels, w_dim)
def forward(self, x, w):
x = self.adain1(self.leaky(self.inject_noise1(self.conv1(x))), w)
x = self.adain2(self.leaky(self.inject_noise2(self.conv2(x))), w)
return x
class PixelNorm(nn.Module):
def __init__(self):
super(PixelNorm, self).__init__()
self.epsilon = 1e-08
def forward(self, x):
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon)
class MappingNetwork(nn.Module):
def __init__(self, z_dim, w_dim):
super().__init__()
self.mapping = nn.Sequential(PixelNorm(), WSLinear(z_dim, w_dim), nn.ReLU(), WSLinear(w_dim, w_dim), nn.ReLU(), WSLinear(w_dim, w_dim), nn.ReLU(), WSLinear(w_dim, w_dim), nn.ReLU(), WSLinear(w_dim, w_dim), nn.ReLU(), WSLinear(w_dim, w_dim), nn.ReLU(), WSLinear(w_dim, w_dim), nn.ReLU(), WSLinear(w_dim, w_dim))
def forward(self, x):
return self.mapping(x)
class Generator(nn.Module):
def __init__(self, z_dim, w_dim, in_channels, img_channels=3):
super(Generator, self).__init__()
self.starting_constant = nn.Parameter(torch.ones((1, in_channels, 4, 4)))
self.map = MappingNetwork(z_dim, w_dim)
self.initial_adain1 = AdaIN(in_channels, w_dim)
self.initial_adain2 = AdaIN(in_channels, w_dim)
self.initial_noise1 = InjectNoise(in_channels)
self.initial_noise2 = InjectNoise(in_channels)
self.initial_conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
self.leaky = nn.LeakyReLU(0.2, inplace=True)
self.initial_rgb = WSConv2d(in_channels, img_channels, kernel_size=1, stride=1, padding=0)
self.prog_blocks, self.rgb_layers = nn.ModuleList([]), nn.ModuleList([self.initial_rgb])
for i in range(len(factors) - 1):
conv_in_c = int(in_channels * factors[i])
conv_out_c = int(in_channels * factors[i + 1])
self.prog_blocks.append(GenBlock(conv_in_c, conv_out_c, w_dim))
self.rgb_layers.append(WSConv2d(conv_out_c, img_channels, kernel_size=1, stride=1, padding=0))
def fade_in(self, alpha, upscaled, generated):
return torch.tanh(alpha * generated + (1 - alpha) * upscaled)
def forward(self, noise, alpha, steps):
w = self.map(noise)
x = self.initial_adain1(self.initial_noise1(self.starting_constant), w)
x = self.initial_conv(x)
out = self.initial_adain2(self.leaky(self.initial_noise2(x)), w)
if steps == 0:
return self.initial_rgb(x)
for step in range(steps):
upscaled = F.interpolate(out, scale_factor=2, mode='bilinear')
out = self.prog_blocks[step](upscaled, w)
final_upscaled = self.rgb_layers[steps - 1](upscaled)
final_out = self.rgb_layers[steps](out)
return self.fade_in(alpha, final_upscaled, final_out)
class Block(nn.Module):
def __init__(self, in_channels, out_channels, down=True, act='relu', use_dropout=False):
super(Block, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, 4, 2, 1, bias=False, padding_mode='reflect') if down else nn.ConvTranspose2d(in_channels, out_channels, 4, 2, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU() if act == 'relu' else nn.LeakyReLU(0.2))
self.use_dropout = use_dropout
self.dropout = nn.Dropout(0.5)
self.down = down
def forward(self, x):
x = self.conv(x)
return self.dropout(x) if self.use_dropout else x
class ResidualBlock(nn.Module):
def __init__(self, channels, use_residual=True, num_repeats=1):
super().__init__()
self.layers = nn.ModuleList()
for repeat in range(num_repeats):
self.layers += [nn.Sequential(CNNBlock(channels, channels // 2, kernel_size=1), CNNBlock(channels // 2, channels, kernel_size=3, padding=1))]
self.use_residual = use_residual
self.num_repeats = num_repeats
def forward(self, x):
for layer in self.layers:
if self.use_residual:
x = x + layer(x)
else:
x = layer(x)
return x
class VGGLoss(nn.Module):
def __init__(self):
super().__init__()
self.vgg = vgg19(pretrained=True).features[:36].eval()
self.loss = nn.MSELoss()
for param in self.vgg.parameters():
param.requires_grad = False
def forward(self, input, target):
vgg_input_features = self.vgg(input)
vgg_target_features = self.vgg(target)
return self.loss(vgg_input_features, vgg_target_features)
class UpsampleBlock(nn.Module):
def __init__(self, in_c, scale_factor):
super().__init__()
self.conv = nn.Conv2d(in_c, in_c * scale_factor ** 2, 3, 1, 1)
self.ps = nn.PixelShuffle(scale_factor)
self.act = nn.PReLU(num_parameters=in_c)
def forward(self, x):
return self.act(self.ps(self.conv(x)))
class DenseResidualBlock(nn.Module):
def __init__(self, in_channels, channels=32, residual_beta=0.2):
super().__init__()
self.residual_beta = residual_beta
self.blocks = nn.ModuleList()
for i in range(5):
self.blocks.append(ConvBlock(in_channels + channels * i, channels if i <= 3 else in_channels, kernel_size=3, stride=1, padding=1, use_act=True if i <= 3 else False))
def forward(self, x):
new_inputs = x
for block in self.blocks:
out = block(new_inputs)
new_inputs = torch.cat([new_inputs, out], dim=1)
return self.residual_beta * out + x
class RRDB(nn.Module):
def __init__(self, in_channels, residual_beta=0.2):
super().__init__()
self.residual_beta = residual_beta
self.rrdb = nn.Sequential(*[DenseResidualBlock(in_channels) for _ in range(3)])
def forward(self, x):
return self.rrdb(x) * self.residual_beta + x
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.conv(x)
class UNET(nn.Module):
def __init__(self, in_channels=3, out_channels=1, features=[64, 128, 256, 512]):
super(UNET, self).__init__()
self.ups = nn.ModuleList()
self.downs = nn.ModuleList()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
for feature in features:
self.downs.append(DoubleConv(in_channels, feature))
in_channels = feature
for feature in reversed(features):
self.ups.append(nn.ConvTranspose2d(feature * 2, feature, kernel_size=2, stride=2))
self.ups.append(DoubleConv(feature * 2, feature))
self.bottleneck = DoubleConv(features[-1], features[-1] * 2)
self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)
def forward(self, x):
skip_connections = []
for down in self.downs:
x = down(x)
skip_connections.append(x)
x = self.pool(x)
x = self.bottleneck(x)
skip_connections = skip_connections[::-1]
for idx in range(0, len(self.ups), 2):
x = self.ups[idx](x)
skip_connection = skip_connections[idx // 2]
if x.shape != skip_connection.shape:
x = TF.resize(x, size=skip_connection.shape[2:])
concat_skip = torch.cat((skip_connection, x), dim=1)
x = self.ups[idx + 1](concat_skip)
return self.final_conv(x)
class VariationalAutoEncoder(nn.Module):
def __init__(self, input_dim, h_dim=200, z_dim=20):
super().__init__()
self.img_2hid = nn.Linear(input_dim, h_dim)
self.hid_2mu = nn.Linear(h_dim, z_dim)
self.hid_2sigma = nn.Linear(h_dim, z_dim)
self.z_2hid = nn.Linear(z_dim, h_dim)
self.hid_2img = nn.Linear(h_dim, input_dim)
self.relu = nn.ReLU()
def encode(self, x):
h = self.relu(self.img_2hid(x))
mu, sigma = self.hid_2mu(h), self.hid_2sigma(h)
return mu, sigma
def decode(self, z):
h = self.relu(self.z_2hid(z))
return torch.sigmoid(self.hid_2img(h))
def forward(self, x):
mu, sigma = self.encode(x)
epsilon = torch.randn_like(sigma)
z_new = mu + sigma * epsilon
x_reconstructed = self.decode(z_new)
return x_reconstructed, mu, sigma
class EncoderCNN(nn.Module):
def __init__(self, embed_size, train_CNN=False):
super(EncoderCNN, self).__init__()
self.train_CNN = train_CNN
self.inception = models.inception_v3(pretrained=True, aux_logits=False)
self.inception.fc = nn.Linear(self.inception.fc.in_features, embed_size)
self.relu = nn.ReLU()
self.times = []
self.dropout = nn.Dropout(0.5)
def forward(self, images):
features = self.inception(images)
return self.dropout(self.relu(features))
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers)
self.linear = nn.Linear(hidden_size, vocab_size)
self.dropout = nn.Dropout(0.5)
def forward(self, features, captions):
embeddings = self.dropout(self.embed(captions))
embeddings = torch.cat((features.unsqueeze(0), embeddings), dim=0)
hiddens, _ = self.lstm(embeddings)
outputs = self.linear(hiddens)
return outputs
class CNNtoRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
super(CNNtoRNN, self).__init__()
self.encoderCNN = EncoderCNN(embed_size)
self.decoderRNN = DecoderRNN(embed_size, hidden_size, vocab_size, num_layers)
def forward(self, images, captions):
features = self.encoderCNN(images)
outputs = self.decoderRNN(features, captions)
return outputs
def caption_image(self, image, vocabulary, max_length=50):
result_caption = []
with torch.no_grad():
x = self.encoderCNN(image).unsqueeze(0)
states = None
for _ in range(max_length):
hiddens, states = self.decoderRNN.lstm(x, states)
output = self.decoderRNN.linear(hiddens.squeeze(0))
predicted = output.argmax(1)
result_caption.append(predicted.item())
x = self.decoderRNN.embed(predicted).unsqueeze(0)
if vocabulary.itos[predicted.item()] == '<EOS>':
break
return [vocabulary.itos[idx] for idx in result_caption]
class Transformer(nn.Module):
def __init__(self, src_vocab_size, trg_vocab_size, src_pad_idx, trg_pad_idx, embed_size=512, num_layers=6, forward_expansion=4, heads=8, dropout=0, device='cpu', max_length=100):
super(Transformer, self).__init__()
self.encoder = Encoder(src_vocab_size, embed_size, num_layers, heads, device, forward_expansion, dropout, max_length)
self.decoder = Decoder(trg_vocab_size, embed_size, num_layers, heads, forward_expansion, dropout, device, max_length)
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.device = device
def make_src_mask(self, src):
src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
return src_mask
def make_trg_mask(self, trg):
N, trg_len = trg.shape
trg_mask = torch.tril(torch.ones((trg_len, trg_len))).expand(N, 1, trg_len, trg_len)
return trg_mask
def forward(self, src, trg):
src_mask = self.make_src_mask(src)
trg_mask = self.make_trg_mask(trg)
enc_src = self.encoder(src, src_mask)
out = self.decoder(trg, enc_src, src_mask, trg_mask)
return out
def intersection_over_union(boxes_preds, boxes_labels, box_format='midpoint'):
"""
Calculates intersection over union
Parameters:
boxes_preds (tensor): Predictions of Bounding Boxes (BATCH_SIZE, 4)
boxes_labels (tensor): Correct Labels of Boxes (BATCH_SIZE, 4)
box_format (str): midpoint/corners, if boxes (x,y,w,h) or (x1,y1,x2,y2)
Returns:
tensor: Intersection over union for all examples
"""
if box_format == 'midpoint':
box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2
box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2
box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2
box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2
box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
elif box_format == 'corners':
box1_x1 = boxes_preds[..., 0:1]
box1_y1 = boxes_preds[..., 1:2]
box1_x2 = boxes_preds[..., 2:3]
box1_y2 = boxes_preds[..., 3:4]
box2_x1 = boxes_labels[..., 0:1]
box2_y1 = boxes_labels[..., 1:2]
box2_x2 = boxes_labels[..., 2:3]
box2_y2 = boxes_labels[..., 3:4]
x1 = torch.max(box1_x1, box2_x1)
y1 = torch.max(box1_y1, box2_y1)
x2 = torch.min(box1_x2, box2_x2)
y2 = torch.min(box1_y2, box2_y2)
intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
return intersection / (box1_area + box2_area - intersection + 1e-06)
class YoloLoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
self.bce = nn.BCEWithLogitsLoss()
self.entropy = nn.CrossEntropyLoss()
self.sigmoid = nn.Sigmoid()
self.lambda_class = 1
self.lambda_noobj = 10
self.lambda_obj = 1
self.lambda_box = 10
def forward(self, predictions, target, anchors):
obj = target[..., 0] == 1
noobj = target[..., 0] == 0
no_object_loss = self.bce(predictions[..., 0:1][noobj], target[..., 0:1][noobj])
anchors = anchors.reshape(1, 3, 1, 1, 2)
box_preds = torch.cat([self.sigmoid(predictions[..., 1:3]), torch.exp(predictions[..., 3:5]) * anchors], dim=-1)
ious = intersection_over_union(box_preds[obj], target[..., 1:5][obj]).detach()
object_loss = self.mse(self.sigmoid(predictions[..., 0:1][obj]), ious * target[..., 0:1][obj])
predictions[..., 1:3] = self.sigmoid(predictions[..., 1:3])
target[..., 3:5] = torch.log(1e-16 + target[..., 3:5] / anchors)
box_loss = self.mse(predictions[..., 1:5][obj], target[..., 1:5][obj])
class_loss = self.entropy(predictions[..., 5:][obj], target[..., 5][obj].long())
return self.lambda_box * box_loss + self.lambda_obj * object_loss + self.lambda_noobj * no_object_loss + self.lambda_class * class_loss
architecture_config = [(7, 64, 2, 3), 'M', (3, 192, 1, 1), 'M', (1, 128, 1, 0), (3, 256, 1, 1), (1, 256, 1, 0), (3, 512, 1, 1), 'M', [(1, 256, 1, 0), (3, 512, 1, 1), 4], (1, 512, 1, 0), (3, 1024, 1, 1), 'M', [(1, 512, 1, 0), (3, 1024, 1, 1), 2], (3, 1024, 1, 1), (3, 1024, 2, 1), (3, 1024, 1, 1), (3, 1024, 1, 1)]
class Yolov1(nn.Module):
def __init__(self, in_channels=3, **kwargs):
super(Yolov1, self).__init__()
self.architecture = architecture_config
self.in_channels = in_channels
self.darknet = self._create_conv_layers(self.architecture)
self.fcs = self._create_fcs(**kwargs)
def forward(self, x):
x = self.darknet(x)
return self.fcs(torch.flatten(x, start_dim=1))
def _create_conv_layers(self, architecture):
layers = []
in_channels = self.in_channels
for x in architecture:
if type(x) == tuple:
layers += [CNNBlock(in_channels, x[1], kernel_size=x[0], stride=x[2], padding=x[3])]
in_channels = x[1]
elif type(x) == str:
layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))]
elif type(x) == list:
conv1 = x[0]
conv2 = x[1]
num_repeats = x[2]
for _ in range(num_repeats):
layers += [CNNBlock(in_channels, conv1[1], kernel_size=conv1[0], stride=conv1[2], padding=conv1[3])]
layers += [CNNBlock(conv1[1], conv2[1], kernel_size=conv2[0], stride=conv2[2], padding=conv2[3])]
in_channels = conv2[1]
return nn.Sequential(*layers)
def _create_fcs(self, split_size, num_boxes, num_classes):
S, B, C = split_size, num_boxes, num_classes
return nn.Sequential(nn.Flatten(), nn.Linear(1024 * S * S, 496), nn.Dropout(0.0), nn.LeakyReLU(0.1), nn.Linear(496, S * S * (C + B * 5)))
class ScalePrediction(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.pred = nn.Sequential(CNNBlock(in_channels, 2 * in_channels, kernel_size=3, padding=1), CNNBlock(2 * in_channels, (num_classes + 5) * 3, bn_act=False, kernel_size=1))
self.num_classes = num_classes
def forward(self, x):
return self.pred(x).reshape(x.shape[0], 3, self.num_classes + 5, x.shape[2], x.shape[3]).permute(0, 1, 3, 4, 2)
class YOLOv3(nn.Module):
def __init__(self, in_channels=3, num_classes=80):
super().__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.layers = self._create_conv_layers()
def forward(self, x):
outputs = []
route_connections = []
for layer in self.layers:
if isinstance(layer, ScalePrediction):
outputs.append(layer(x))
continue
x = layer(x)
if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
route_connections.append(x)
elif isinstance(layer, nn.Upsample):
x = torch.cat([x, route_connections[-1]], dim=1)
route_connections.pop()
return outputs
def _create_conv_layers(self):
layers = nn.ModuleList()
in_channels = self.in_channels
for module in config:
if isinstance(module, tuple):
out_channels, kernel_size, stride = module
layers.append(CNNBlock(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=1 if kernel_size == 3 else 0))
in_channels = out_channels
elif isinstance(module, list):
num_repeats = module[1]
layers.append(ResidualBlock(in_channels, num_repeats=num_repeats))
elif isinstance(module, str):
if module == 'S':
layers += [ResidualBlock(in_channels, use_residual=False, num_repeats=1), CNNBlock(in_channels, in_channels // 2, kernel_size=1), ScalePrediction(in_channels // 2, num_classes=self.num_classes)]
in_channels = in_channels // 2
elif module == 'U':
layers.append(nn.Upsample(scale_factor=2))
in_channels = in_channels * 3
return layers
class Net(nn.Module):
def __init__(self, net_version, num_classes):
super(Net, self).__init__()
self.backbone = EfficientNet.from_pretrained('efficientnet-' + net_version)
self.backbone._fc = nn.Sequential(nn.Linear(1280, num_classes))
def forward(self, x):
return self.backbone(x)
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(AdaIN,
lambda: ([], {'channels': 4, 'w_dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(BRNN,
lambda: ([], {'input_size': 4, 'hidden_size': 4, 'num_layers': 1, 'num_classes': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
True),
(BasicConv2d,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Block,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(CNNBlock,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(ConvBlock,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DecoderBlock,
lambda: ([], {'embed_size': 4, 'heads': 4, 'forward_expansion': 4, 'dropout': 0.5, 'device': 0}),
lambda: ([torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])], {}),
True),
(DoubleConv,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Inception,
lambda: ([], {'in_channels': 4, 'out1x1': 4, 'out3x3reduced': 4, 'out3x3': 4, 'out5x5reduced': 4, 'out5x5': 4, 'outpool': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Inception_block,
lambda: ([], {'in_channels': 4, 'out_1x1': 4, 'red_3x3': 4, 'out_3x3': 4, 'red_5x5': 4, 'out_5x5': 4, 'out_1x1pool': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(InjectNoise,
lambda: ([], {'channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(MappingNetwork,
lambda: ([], {'z_dim': 4, 'w_dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(NN,
lambda: ([], {'input_size': 4, 'num_classes': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(PixelNorm,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(ResidualBlock,
lambda: ([], {'channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(ScalePrediction,
lambda: ([], {'in_channels': 4, 'num_classes': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(SelfAttention,
lambda: ([], {'embed_size': 4, 'heads': 4}),
lambda: ([torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])], {}),
True),
(SqueezeExcitation,
lambda: ([], {'in_channels': 4, 'reduced_dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(TransformerBlock,
lambda: ([], {'embed_size': 4, 'heads': 4, 'dropout': 0.5, 'forward_expansion': 4}),
lambda: ([torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])], {}),
True),
(UNET,
lambda: ([], {}),
lambda: ([torch.rand([4, 3, 64, 64])], {}),
False),
(VGG,
lambda: ([], {}),
lambda: ([torch.rand([4, 3, 64, 64])], {}),
True),
(VGGLoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 3, 64, 64]), torch.rand([4, 3, 64, 64])], {}),
True),
(VariationalAutoEncoder,
lambda: ([], {'input_dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(WSConv2d,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(WSLinear,
lambda: ([], {'in_features': 4, 'out_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(YOLOv3,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(conv_block,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
]
class Test_aladdinpersson_Machine_Learning_Collection(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
def test_009(self):
self._check(*TESTCASES[9])
def test_010(self):
self._check(*TESTCASES[10])
def test_011(self):
self._check(*TESTCASES[11])
def test_012(self):
self._check(*TESTCASES[12])
def test_013(self):
self._check(*TESTCASES[13])
def test_014(self):
self._check(*TESTCASES[14])
def test_015(self):
self._check(*TESTCASES[15])
def test_016(self):
self._check(*TESTCASES[16])
def test_017(self):
self._check(*TESTCASES[17])
def test_018(self):
self._check(*TESTCASES[18])
def test_019(self):
self._check(*TESTCASES[19])
def test_020(self):
self._check(*TESTCASES[20])
def test_021(self):
self._check(*TESTCASES[21])
def test_022(self):
self._check(*TESTCASES[22])
def test_023(self):
self._check(*TESTCASES[23])
def test_024(self):
self._check(*TESTCASES[24])
def test_025(self):
self._check(*TESTCASES[25])
def test_026(self):
self._check(*TESTCASES[26])
|
from interaction.chatbots.chat_bot_structure import ChatBotStucture
from interaction.chatbots.identifier import Identifier, IdentifierType
from utils.minuteur import Minuteur
import time
class MinuteurChatBot(ChatBotStucture):
def __init__(self, services, find_number=False):
super().__init__(services, find_number)
self.keyword = "minuteur"
identifier_time_unity = Identifier("time unity", IdentifierType.CONTEXT)
identifier_time_unity.add_content_from_list(["secondes", "seconde", "minute", "minutes", "heure", "heures"])
self.identifiers_list = [identifier_time_unity]
def get_answer(self, recognition_result, language):
super().get_answer(recognition_result, language)
print("get_answer minuteur")
list_time_unity = self.speech_dict["time unity"]
list_duration = self.speech_dict["numbers"]
if len(list_time_unity) and len(list_duration):
converter = 1
if "second" in list_time_unity[0]:
converter = 1
elif "minute" in list_time_unity[0]:
converter = 60
elif "heure" in list_time_unity[0]:
converter = 3600
print("duration {} * {}".format(list_duration[0], converter))
self.services.minuteur.update_duration(list_duration[0]*converter)
self.run_behavior(lambda: self.services.minuteur.start_minuteur())
return "c'est parti pour {} {}".format(list_duration[0], list_time_unity[0])
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
HTTP TI Provider base.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
from functools import lru_cache
from json import JSONDecodeError
from typing import Any, Dict
import pandas as pd
from ..._version import VERSION
from ...common.pkg_config import get_http_timeout
from ...common.utility import export
from ..http_provider import HttpProvider
from ..lookup_result import LookupStatus
from .result_severity import ResultSeverity
from .ti_provider_base import TIProvider
__version__ = VERSION
__author__ = "Ian Hellen"
@export
class HttpTIProvider(TIProvider, HttpProvider):
"""HTTP API Lookup provider base class."""
@lru_cache(maxsize=256)
def lookup_ioc(
self, ioc: str, ioc_type: str = None, query_type: str = None, **kwargs
) -> pd.DataFrame:
"""
Lookup from a value.
Parameters
----------
ioc : str
ioc to lookup
ioc_type : str, optional
The Type of the ioc to lookup, by default None (type will be inferred)
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the ioc
will be returned.
Returns
-------
pd.DataFrame
The lookup result:
result - Positive/Negative,
details - Lookup Details (or status if failure),
raw_result - Raw Response
reference - URL of the item
Raises
------
NotImplementedError
If attempting to use an HTTP method or authentication
protocol that is not supported.
Notes
-----
Note: this method uses memoization (lru_cache) to cache results
for a particular item to try avoid repeated network calls for
the same item.
"""
result = self._check_ioc_type(ioc, ioc_type, query_subtype=query_type)
result["Provider"] = kwargs.get("provider_name", self.__class__.__name__)
req_params: Dict[str, Any] = {}
try:
verb, req_params = self._substitute_parms(
result["SafeIoc"], result["IocType"], query_type
)
if verb == "GET":
response = self._httpx_client.get(
**req_params, timeout=get_http_timeout(**kwargs)
)
else:
raise NotImplementedError(f"Unsupported verb {verb}")
result["Status"] = response.status_code
result["Reference"] = req_params["url"]
if result["Status"] == 200:
try:
result["RawResult"] = response.json()
result["Result"], severity, result["Details"] = self.parse_results(
result
)
except JSONDecodeError:
result[
"RawResult"
] = f"""There was a problem parsing results from this lookup:
{response.text}"""
result["Result"] = False
severity = ResultSeverity.information
result["Details"] = {}
if isinstance(severity, ResultSeverity):
result["Severity"] = severity.name
result["Severity"] = ResultSeverity.parse(severity).name
result["Status"] = LookupStatus.OK.value
else:
result["RawResult"] = str(response)
result["Result"] = False
result["Details"] = self._response_message(result["Status"])
except (
LookupError,
JSONDecodeError,
NotImplementedError,
ConnectionError,
) as err:
self._err_to_results(result, err)
if not isinstance(err, LookupError):
url = req_params.get("url", None) if req_params else None
result["Reference"] = url
return pd.DataFrame([result])
|
import time
import subprocess
import utils
from utils import assignOrder
from utils import assertEqual
from utils import assertContains
from utils import randomString
import threading
import queue
import random
from collections import OrderedDict
import logging
import pprint
import configparser
import json
import random
import requests
import datetime
import random
global status
status = {}
logger = logging.getLogger("Test Run")
config = configparser.ConfigParser()
config.read('settings.conf')
ResponseTime = config.get('params', 'response_time')
config.read('testdata.conf')
now = datetime.datetime.now()
user_name = config.get('params', 'username')
global headersUser1
x = random.randint(0, 50000)
global id_cred,metadata,audit_log_download_id
id_cred={}
metadata={}
audit_log_download_id={}
class AuditLogDownload(object):
def __init__(self,client):
global headersUser1
self.api_client = client
self.invoice_id = random.randint(100000,999999)
headersUser1 = {
"Username": user_name,
"Content-Type": "application/json"
}
########################### CORE-1915 ##########################
# @assignOrder(205)
# def CORE1915_downloadAuditLogJSON(self):
# passed = False
# body = {
# "format": "json",
# "secret": "Test123!",
# "searchType": "keyword",
# "searchText": "cloud",
# "messageType": "[]",
# "component": "[]",
# "subcomponent": "[]",
# "userId": "[]",
# "teamId": "[]",
# "sort": "asc",
# "sortBy": "userId"
# };
# try:
# resp, body = self.api_client.downloadAuditLog(body)
# print (resp)
# print body
# data = json.dumps(body)
# data = json.loads(data)
# print data
# audit_log_download_id[0] = data['job_id']
# except Exception:
# audit_log_download_id[0]=0
# passed=False
# status['CAM-APITest'] = passed
# return passed
#
# logger.info("API response:" + str(resp))
# passOfResponseCode = assertEqual(resp, 200)
# if (passOfResponseCode):
# passed = True
# status['CAM-APITest'] = passed
# return passed
#
# @assignOrder(206)
# def CORE1915_downloadAuditLogLocalSystemValidId(self):
# passed = False
# resp, body = self.api_client.downloadAuditLogLocal(audit_log_download_id[0])
# print (resp)
# logger.info("API response:" + str(resp))
# passOfResponseCode = assertEqual(resp, 200)
# if (passOfResponseCode):
# passed = True
# status['CAM-APITest'] = passed
# return passed
@assignOrder(370)
def CORE1915_downloadAuditLogJSONAllParam(self):
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "2017-01-01T01:00:00.000Z",
"endDate": "2018-06-01T12:00:00.000Z",
"messageType": "['Search']",
"component": "['comp']",
"subcomponent": "['sub']",
"userId": "['system']",
"teamId": "['default']",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(371)
def CORE1915_downloadAuditLogLocalSystemInValidId(self):
passed = False
resp, body = self.api_client.downloadAuditLogLocal(3434)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 500)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(372)
def CORE1915_downloadAuditLogCSV(self):
passed = False
body = {
"format": "csv",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "2018-01-01T01:00:00.000Z",
"endDate": "2018-06-01T12:00:00.000Z",
"messageType": "['Search']",
"component": "['']",
"subcomponent": "['']",
"userId": "['system']",
"teamId": "['default']",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(373)
def CORE1915_downloadAuditLogCSVNoPassword(self):
passed = False
body = {
"searchType": "advanced",
"startDate": "2018-01-01T01:00:00.000Z",
"endDate": "2018-06-01T12:00:00.000Z",
"messageType": "['Search']",
"component": "['comp']",
"subcomponent": "['sub']",
"userId": "['system']",
"teamId": "['default']",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 400)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(374)
def CORE1915_downloadAuditLogNoformat(self):
passed = False
body = {
"secret": "Test123!",
"searchType": "advanced",
"startDate": "2018-01-01T01:00:00.000Z",
"endDate": "2018-06-01T12:00:00.000Z",
"messageType": "['Search']",
"component": "['comp']",
"subcomponent": "['sub']",
"userId": "['system']",
"teamId": "['default']",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 400)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(375)
def CORE1915_downloadAuditLogWrongFormat(self):
passed = False
body = {
"format": "xyz",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "2018-01-01T01:00:00.000Z",
"endDate": "2018-06-01T12:00:00.000Z",
"messageType": "['Search']",
"component": "['comp']",
"subcomponent": "['sub']",
"userId": "['system']",
"teamId": "['default']",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 400)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(376)
def CORE1915_downloadAuditLogBasicSearch(self):
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "basic",
"startDate": "2018-01-01T01:00:00.000Z",
"endDate": "2018-06-01T12:00:00.000Z",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(377)
def CORE1915_downloadAuditLogBasicSearchNoDates(self):
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "basic",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 400)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(378)
def CORE1915_downloadAuditLogKeywordSearch(self):
try:
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "keyword",
"searchText": "cloud",
"startDate": "",
"endDate": "",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(379)
def CORE1915_downloadAuditLogNoKeyword(self):
try:
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "keyword",
"searchText": "",
"startDate": "",
"endDate": "",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 400)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(380)
def CORE1915_downloadAuditLogOnlyDates(self):
try:
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "2018-01-01T01:00:00.000Z",
"endDate": "2018-06-01T12:00:00.000Z",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(381)
def CORE1915_downloadAuditLogOnlyActionType(self):
try:
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "",
"endDate": "",
"messageType": "['UPDATE ROLE']",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(382)
def CORE1915_downloadAuditLogOnlyComponent(self):
try:
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "",
"endDate": "",
"messageType": "[]",
"component": "['component']",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(383)
def CORE1915_downloadAuditLogOnlySubComponent(self):
try:
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "",
"endDate": "",
"messageType": "[]",
"component": "[]",
"subcomponent": "['subcomponent']",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(384)
def CORE1915_downloadAuditLogOnlyUser(self):
try:
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "",
"endDate": "",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[user_name]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
@assignOrder(385)
def CORE1915_downloadAuditLogOnlyTeam(self):
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "",
"endDate": "",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "['TEAM1']",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
# @assignOrder(223)
# def downloadAuditLogOnlyDetails(self):
# passed = False
# body = {
# "format": "json",
# "secret": "Test1234!",
# "searchType": "advanced",
# "startDate": "",
# "endDate": "",
# "actionType": [],
# "component": [],
# "subComponent": [],
# "user": [],
# "userTeam": [],
# "details": "test"
# };
# resp, body = self.api_client.downloadAuditLog(body)
# print (resp)
# logger.info("API response:" + str(resp))
# passOfResponseCode = assertEqual(resp, 200)
# if (passOfResponseCode):
# passed = True
# status['CAM-APITest'] = passed
# return passed
@assignOrder(386)
def CORE1915_downloadAuditLogOnlyAdvanceSearchNoFilter(self):
passed = False
body = {
"format": "json",
"secret": "Test123!",
"searchType": "advanced",
"startDate": "",
"endDate": "",
"messageType": "[]",
"component": "[]",
"subcomponent": "[]",
"userId": "[]",
"teamId": "[]",
"sort": "asc",
"sortBy": "userId"
};
resp, body = self.api_client.downloadAuditLog(body)
print (resp)
logger.info("API response:" + str(resp))
passOfResponseCode = assertEqual(resp, 200)
if (passOfResponseCode):
passed = True
status['CAM-APITest'] = passed
return passed
@assignOrder(387)
def CORE1915_auditLogPaginationCheck(self):
passed = False
try:
resp, body = self.api_client.keyWordSearch()
print (resp)
logger.info("API response:" + str(resp))
data = json.dumps(body)
data = json.loads(data)
print (data['total_rows'])
record1 = data['total_rows']
# change the limit to 25
resp, body = self.api_client.keyWordSearchwithLimit25()
print (resp)
logger.info("API response:" + str(resp))
data = json.dumps(body)
data = json.loads(data)
print (data['total_rows'])
record2 = data['total_rows']
#change the pagination and move to next page
resp, body = self.api_client.keyWordSearchwithLimit25_pagination()
print (resp)
logger.info("API response:" + str(resp))
data = json.dumps(body)
data = json.loads(data)
print (data['total_rows'])
record3 = data['total_rows']
if(record1==record2 & record2==record3):
passed = True
status['CAM-APITest'] = passed
return passed
except:
status['CAM-APITest'] = False
return False
|
from models.base_model import BaseModel
import peewee as pw
from models.user import User
from models.disease import Disease
class UserDisease(BaseModel):
user = pw.ForeignKeyField(User, on_delete="CASCADE")
disease = pw.ForeignKeyField(Disease, on_delete="CASCADE") |
import os, sys, string
import arcpy
from arcpy import env
from arcpy.sa import *
import glob
import string
from sets import Set
import math
import time
print "Setting local parameters and inputs"
#Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
env.overwriteOutput = True
beginTime = time.clock()
#Set environment settings
sourceFolder1="C:/Data/cci_connectivity/scratch/conefor_runs/importances"
sourceFolder2="C:/Data/cci_connectivity/scratch/conefor_runs/eca_runs/interCell/raw_dist"
tempFolder="C:/Data/cci_connectivity/scratch"
outFolder="C:/Data/cci_connectivity/scratch/conefor_runs/eca_runs/interCell"
arcpy.env.workspace=sourceFolder2
print "Making a list of tables"
tableList=arcpy.ListTables("*awp_*")
print str(tableList)
cellField="First_cell"
def between(value, a, b):
# Find and validate before-part.
pos_a = value.find(a)
if pos_a == -1: return ""
# Find and validate after part.
pos_b = value.rfind(b)
if pos_b == -1: return ""
# Return middle part.
adjusted_pos_a = pos_a + len(a)
if adjusted_pos_a >= pos_b: return ""
return value[adjusted_pos_a:pos_b]
wCard1="AWP_"
wCard2=".txt"
print "Looping through list of feature classes"
i=0
inMemFC="in_memory"+"\\"+"inMemFC"
for table in tableList[0:2]:
i+=1
print "Feature class {0} of {1}: {2}".format(i,str(len(tableList)),table)
dispConst=100000
inP=0.36788
arcpy.env.workspace=sourceFolder1
spName = between(table,wCard1,wCard2)
print spName
FCList=arcpy.ListFeatureClasses("*impJoin_dis_sp_{0}*".format(spName))
for FC in FCList:
fields=cellField
cellList=list()
cursor1=arcpy.da.SearchCursor(FC,[fields])
for row in cursor1:
cellList.append(row[0])
cellList = list(set(cellList))
print str(len(cellList))
fields="*"
arcpy.env.workspace=sourceFolder2
print table
for cell in cellList:
print "Cell ID: {0}".format(cell)
sumP_area=0
sumArea=0
awp=0
expression = "Field16={0}".format(cell)
#print expression
cursor2=arcpy.da.SearchCursor(table,[fields])#,expression)
for row in cursor2:
if row[12]==cell and row[15]<>cell:
inArea = row[7]
nearArea= row[9]
#print inArea
#print nearArea
dist= row[5]
p = math.exp(-(-1*(math.log(inP)/dispConst)) * dist)
#print p
p_Area = p * nearArea
#print p_Area
area=nearArea
#print area
sumP_area=sumP_area + p_Area
#print sumP_area
sumArea=sumArea+area
#print sumArea
awp=sumP_area/sumArea
print "Cell id = {0} and AWP = {1}".format(str(cell),str(awp))
##
##
|
from typing import List
import pandas as pd
from dataframes_extracted import DataFramesExtracted
class Transformer:
@classmethod
def get_df(cls, dfs: List[pd.DataFrame]) -> pd.DataFrame:
dfs = DataFramesExtracted(dfs)
result_df = cls._merge_dfs(dfs)
result_df = cls._get_df_remove_null_profitability(result_df)
result_df = cls._get_df_max_profitability_for_each_fund(result_df)
result_df = cls._get_df_reindex_columns(result_df)
return result_df
@staticmethod
def _merge_dfs(dfs: DataFramesExtracted) -> pd.DataFrame:
return dfs.name.merge(
dfs.profitability,
on="id",
how="left",
)
@staticmethod
def _get_df_remove_null_profitability(df: pd.DataFrame) -> pd.DataFrame:
return df[df.profitability.notnull()]
@staticmethod
def _get_df_max_profitability_for_each_fund(df: pd.DataFrame) -> pd.DataFrame:
result_df = df[["id", "profitability"]]
result_df = result_df.groupby(["id"], as_index=False).max()
result_df = result_df.merge(
df,
on=["id", "profitability"],
how="left",
)
return result_df
@staticmethod
def _get_df_reindex_columns(df: pd.DataFrame) -> pd.DataFrame:
return df.reindex(columns=["id", "name", "year", "profitability"])
|
#!/usr/bin/python
from pychartdir import *
# The data for the chart
data = [50, 55, 47, 34, 42, 49, 63, 62, 73, 59, 56, 50, 64, 60, 67, 67, 58, 59, 73, 77, 84, 82, 80,
84, 89]
# The error data representing the error band around the data points
errData = [5, 6, 5.1, 6.5, 6.6, 8, 5.4, 5.1, 4.6, 5.0, 5.2, 6.0, 4.9, 5.6, 4.8, 6.2, 7.4, 7.1, 6.5,
9.6, 12.1, 15.3, 18.5, 20.9, 24.1]
# The timestamps for the data
labels = [chartTime(2001, 1, 1), chartTime(2001, 2, 1), chartTime(2001, 3, 1), chartTime(2001, 4, 1
), chartTime(2001, 5, 1), chartTime(2001, 6, 1), chartTime(2001, 7, 1), chartTime(2001, 8, 1),
chartTime(2001, 9, 1), chartTime(2001, 10, 1), chartTime(2001, 11, 1), chartTime(2001, 12, 1),
chartTime(2002, 1, 1), chartTime(2002, 2, 1), chartTime(2002, 3, 1), chartTime(2002, 4, 1),
chartTime(2002, 5, 1), chartTime(2002, 6, 1), chartTime(2002, 7, 1), chartTime(2002, 8, 1),
chartTime(2002, 9, 1), chartTime(2002, 10, 1), chartTime(2002, 11, 1), chartTime(2002, 12, 1),
chartTime(2003, 1, 1)]
# Create a XYChart object of size 550 x 220 pixels
c = XYChart(550, 220)
# Set the plot area at (50, 10) and of size 480 x 180 pixels. Enabled both vertical and horizontal
# grids by setting their colors to light grey (cccccc)
c.setPlotArea(50, 10, 480, 180).setGridColor(0xcccccc, 0xcccccc)
# Add a legend box (50, 10) (top of plot area) using horizontal layout. Use 8pt Arial font. Disable
# bounding box (set border to transparent).
legendBox = c.addLegend(50, 10, 0, "", 8)
legendBox.setBackground(Transparent)
# Add keys to the legend box to explain the color zones
legendBox.addKey("Historical", 0x9999ff)
legendBox.addKey("Forecast", 0xff9966)
# Add a title to the y axis.
c.yAxis().setTitle("Energy Consumption")
# Set the labels on the x axis
c.xAxis().setLabels2(labels)
# Set multi-style axis label formatting. Use Arial Bold font for yearly labels and display them as
# "yyyy". Use default font for monthly labels and display them as "mmm". Replace some labels with
# minor ticks to ensure the labels are at least 3 units apart.
c.xAxis().setMultiFormat(StartOfYearFilter(), "<*font=arialbd.ttf*>{value|yyyy}",
StartOfMonthFilter(), "{value|mmm}", 3)
# Add a line layer to the chart
layer = c.addLineLayer2()
# Create the color to draw the data line. The line is blue (0x333399) to the left of x = 18, and
# become a red (0xd04040) dash line to the right of x = 18.
lineColor = layer.xZoneColor(18, 0x333399, c.dashLineColor(0xd04040, DashLine))
# Add the data line
layer.addDataSet(data, lineColor)
# Create the color to draw the err zone. The color is semi-transparent blue (0x809999ff) to the left
# of x = 18, and become semi-transparent red (0x80ff9966) to the right of x = 18.
errColor = layer.xZoneColor(18, 0x809999ff, 0x80ff9966)
# Add the upper border of the err zone
layer.addDataSet(ArrayMath(data).add(errData).result(), errColor)
# Add the lower border of the err zone
layer.addDataSet(ArrayMath(data).sub(errData).result(), errColor)
# Set the default line width to 2 pixels
layer.setLineWidth(2)
# Color the region between the err zone lines
c.addInterLineLayer(layer.getLine(1), layer.getLine(2), errColor)
# Output the chart
c.makeChart("xzonecolor.png")
|
import json
import boto3
from botocore.exceptions import ClientError
from django.conf import settings
from .settings import DJANGO_SLOOP_SETTINGS
from .models import AbstractSNSDevice
class SNSHandler(object):
client = None
def __init__(self, device):
self.device = device
self.client = self.get_client()
def get_client(self):
if self.client:
return self.client
client = boto3.client(
'sns',
region_name=DJANGO_SLOOP_SETTINGS.get("AWS_REGION_NAME") or None,
aws_access_key_id=DJANGO_SLOOP_SETTINGS.get("AWS_ACCESS_KEY_ID") or None,
aws_secret_access_key=DJANGO_SLOOP_SETTINGS.get("AWS_SECRET_ACCESS_KEY") or None
)
return client
@property
def application_arn(self):
if self.device.platform == AbstractSNSDevice.PLATFORM_IOS:
application_arn = DJANGO_SLOOP_SETTINGS.get("SNS_IOS_APPLICATION_ARN")
elif self.device.platform == AbstractSNSDevice.PLATFORM_ANDROID:
application_arn = DJANGO_SLOOP_SETTINGS.get("SNS_ANDROID_APPLICATION_ARN")
else:
assert False
return application_arn
def send_push_notification(self, message, url, badge_count, sound, extra, category, **kwargs):
if self.device.platform == AbstractSNSDevice.PLATFORM_IOS:
data = self.generate_apns_push_notification_message(message, url, badge_count, sound, extra, category, **kwargs)
else:
data = self.generate_gcm_push_notification_message(message, url, badge_count, sound, extra, category, **kwargs)
return self._send_payload(data)
def send_silent_push_notification(self, extra, badge_count, content_available, **kwargs):
if self.device.platform == AbstractSNSDevice.PLATFORM_IOS:
data = self.generate_apns_silent_push_notification_message(extra, badge_count, content_available, **kwargs)
else:
data = self.generate_gcm_silent_push_notification_message(extra, badge_count, content_available, **kwargs)
return self._send_payload(data)
def generate_gcm_push_notification_message(self, message, url, badge_count, sound, extra, category, **kwargs):
if not extra:
extra = {}
if url:
extra["url"] = url
data = {
'alert': message,
'sound': sound,
'custom': extra,
'badge': badge_count,
'category': category
}
data.update(kwargs)
data_bundle = {
'data': data
}
data_string = json.dumps(data_bundle, ensure_ascii=False)
return {
'GCM': data_string
}
def generate_gcm_silent_push_notification_message(self, extra, badge_count, content_available, **kwargs):
data = {
'content-available': content_available,
'sound': '',
'badge': badge_count,
'custom': extra
}
data.update(kwargs)
data_bundle = {
'data': data
}
data_string = json.dumps(data_bundle, ensure_ascii=False)
return {
'GCM': data_string
}
def generate_apns_push_notification_message(self, message, url, badge_count, sound, extra, category, **kwargs):
if not extra:
extra = {}
if url:
extra["url"] = url
data = {
'alert': message,
'sound': sound,
'custom': extra,
'badge': badge_count,
'category': category
}
data.update(kwargs)
apns_bundle = {
'aps': data
}
apns_string = json.dumps(apns_bundle, ensure_ascii=False)
if DJANGO_SLOOP_SETTINGS.get("SNS_IOS_SANDBOX_ENABLED"):
return {
'APNS_SANDBOX': apns_string
}
else:
return {
'APNS': apns_string
}
def generate_apns_silent_push_notification_message(self, extra, badge_count, content_available, **kwargs):
data = {
'content-available': content_available,
'sound': '',
'badge': badge_count,
'custom': extra
}
data.update(kwargs)
apns_bundle = {
'aps': data
}
apns_string = json.dumps(apns_bundle, ensure_ascii=False)
if DJANGO_SLOOP_SETTINGS.get("SNS_IOS_SANDBOX_ENABLED"):
return {
'APNS_SANDBOX': apns_string
}
else:
return {
'APNS': apns_string
}
def get_or_create_platform_endpoint_arn(self):
if self.device.sns_platform_endpoint_arn:
endpoint_arn = self.device.sns_platform_endpoint_arn
else:
endpoint_response = self.client.create_platform_endpoint(
PlatformApplicationArn=self.application_arn,
Token=self.device.push_token,
)
endpoint_arn = endpoint_response['EndpointArn']
self.device.sns_platform_endpoint_arn = endpoint_arn
self.device.save(update_fields=["sns_platform_endpoint_arn"])
return endpoint_arn
def _send_payload(self, data):
endpoint_arn = self.get_or_create_platform_endpoint_arn()
message = json.dumps(data, ensure_ascii=False)
if settings.DEBUG:
print("ARN:" + endpoint_arn)
print(message)
try:
publish_result = self.client.publish(
TargetArn=endpoint_arn,
Message=message,
MessageStructure='json'
)
except ClientError as exc:
if exc.response['Error']["Code"] == "EndpointDisabled":
# Push token is not valid anymore.
# App deleted or push notifications are turned off by the user.
self.device.invalidate()
else:
raise
return message, exc.response
if settings.DEBUG:
print(publish_result)
return message, publish_result
|
import cv2
face_detect=cv2.CascadeClassifier('C:\\Users\\OCAC\\Desktop\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml')
video=cv2.VideoCapture(0)
while True:
check,frame=video.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
face=face_detect.detectMultiScale(gray,scaleFactor=1.05,minNeighbors=5)
for (x,y,w,h) in face:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3)
cv2.imshow("Kunu",frame)
key=cv2.waitKey(1)
if key==ord('k'):
break
video.release()
cv2.destroyAllWindows()
|
# Created by Leon Hunter at 2:07 PM 11/30/2020
# Assign a radius
radius = 20.0
# compute area
area = radius * radius * 3.14159
integer = 1
# display results
output = "The area of the circle with radius {} is {}; Third argument is {}"
formattedOutput = output.format(radius, area, "third argument")
print(formattedOutput)
oneString = "1"
oneInteger = 1 |
# Generated by Django 3.2.7 on 2021-09-27 18:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainApp', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Log',
new_name='AccountUser',
),
]
|
"""
This file holds the anonymizer API service
which exposes an RESTful API for inquering
anonymization tasks of a user.
"""
from flask import Flask, request
from flask_restful import reqparse, abort, Resource, Api
import anonymizer
import sys
import string
import random
import json
import redis
app = Flask(__name__)
api = Api(app)
settings = {}
sys.path.append('./')
# fetch credentials from file
def fetch_credentials():
with open('credentials.json') as credentials_file:
return json.load(credentials_file)
def abort_if_job_doesnt_exist(job_id):
if r.get(job_id) is None:
abort(404, message="Job {} doesn't exist".format(job_id))
def abort_if_job_arguments_missing(args):
if "partner_id" not in args:
abort(404, message="partner_id specs are missing")
if "columns" not in args:
abort(404, message="columns specs are missing")
# generates (user) IDs
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
settings = fetch_credentials()
# connect to the redis server
r = redis.StrictRedis(
host=settings["redis"]["host"],
port=settings["redis"]["port"],
db=0
)
parser = reqparse.RequestParser()
parser.add_argument('columns', type = list)
parser.add_argument('partner_id', type = int)
# Job
# shows a single jobs item and lets you delete a todo item
class Job(Resource):
def get(self, job_id):
abort_if_job_doesnt_exist(job_id)
return r.get(job_id)
def delete(self, job_id):
abort_if_job_doesnt_exist(job_id)
return r.delete(job_id), 204
def put(self, job_id):
args = parser.parse_args()
abort_if_job_doesnt_exist(job_id)
abort_if_job_arguments_missing(args)
# since reqparse does not know how to handle JSON properly:
# https://stackoverflow.com/questions/19384526/how-to-parse-the-post-argument-to-a-rest-service
requested_columns = request.json['columns']
value = r.get(key).decode("utf-8")
task = json.loads(value)
task['columns'] = requested_columns
r.set(job_id, json.dumps(task))
return task, 201
# JobList
# shows a list of all jobs, and lets you POST to add new tasks
class JobList(Resource):
def get(self):
result = []
for key in r.scan_iter():
value = r.get(key).decode("utf-8")
result.append(value)
return result
def post(self):
print("recieved request")
args = parser.parse_args()
abort_if_job_arguments_missing(args)
job_id = str(args['partner_id']) +"_"+id_generator()+"_"+id_generator()
table_name = str(args['partner_id']) +"_"+id_generator()
# since reqparse does not know how to handle JSON properly:
# https://stackoverflow.com/questions/19384526/how-to-parse-the-post-argument-to-a-rest-service
requested_columns = request.json['columns']
task = {'job_id':job_id, 'columns': requested_columns, 'partner_id':args['partner_id'], 'table_name':table_name, 'status':'open'}
df = anonymizer.retrieve_meta_data()
colnames = list(df.columns.values)
for req_colname in requested_columns:
if req_colname not in colnames:
error_message = "{0} is not a valid attribute".format(req_colname)
task['status'] = 'failed'
task['cause'] = error_message
return task, 400
r.set(job_id, json.dumps(task))
return task, 201
##
## Actually setup the Api resource routing here
##
api.add_resource(JobList, '/jobs')
api.add_resource(Job, '/jobs/<job_id>')
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding:utf-8 -*-
import random,math
import numpy as np
"""
CLASS: Person
PROPERTY:
id:Person ID(unique value).
spouse: The ID of person's spouse(-1 refer to no spouse).
spouse_num: The rank of spouse in love list.
change_num: The times of change spouse of person.
accepted_threshold: The worst spouse that person can accept in love list.
FUNCTION:
marriage_with(): Establish relation link between two person.
dismarriaged(): Break relation link.
spouse_num_add_1(): Add 1 to spouse_num.
set_spouse_num(): Set set_spouse_num to specified value.
print_all(): Print person information.
"""
class Person(object):
def __init__(self, person_id,feature_list,weight_list):
if not isinstance(person_id,int) or not isinstance(feature_list,list) or not isinstance(weight_list,list):
print person_id
print feature_list
print weight_list
raise ValueError
elif not len(feature_list) == len(weight_list):
raise ValueError
self.__id = person_id
self.__feature_num = len(feature_list)
self.__feature_list = feature_list
self.__weight_list = weight_list
self.__spouse = -1
self.__spouse_num = -1
self.__change_num = 0
self.__accepted_threshold = 0
self.__love_list = []
self.__value_list = []
def marriage_with(self, person_id):
self.__spouse = person_id
self.__change_num = self.__change_num + 1
return self.__spouse
def dismarriaged(self):
self.__spouse = -1
self.__spouse_num = -1
def spouse_num_add_1(self):
self.__spouse_num = self.__spouse_num + 1
def set_spouse_num(self, num):
if num < 0 or num > self.__accepted_threshold + 1:
return False
else:
self.__spouse_num = num
return True
def set_accepted_threshold(self,num):
if num<0 :
return False
else:
self.__accepted_threshold = num - 1
return True
def set_love_list(self,love_list):
self.__love_list = love_list
def get_love_list(self):
return self.__love_list
def get_id(self):
return self.__id
def get_feature_list(self):
return self.__feature_list
def get_feature_num(self):
return self.__feature_num
def set_value_list(self,value_list):
self.__value_list = value_list
def get_value_list(self):
return self.__value_list
def get_weight_list(self):
return self.__weight_list
def get_spouse(self):
return self.__spouse
def get_change_num(self):
return self.__change_num
def get_spouse_num(self):
return self.__spouse_num
def get_accepted_threshold(self):
return self.__accepted_threshold
def print_all(self):
print 'ID:', self.__id
print 'Feature_List: ', self.__feature_list
print 'Weight_List: ',self.__weight_list
print 'Spouse: ', self.__spouse
print 'Change_num: ', self.__change_num
if len(self.__love_list) > 0:
print 'Love_List: ', self.__love_list
if len(self.__value_list) > 0:
print 'Value_List: ', self.__value_list
"""
CLASS: Suitor
PROPERTY: Inherit from CLASS Person
target_iter: Target index in love list.
activity: Suitor state(Stop search when activity is False).
FUNCTION:
go_after(): Try to establish relation link to someone.
__refused():Called when establish relation link failed.
be_thrown():Called when relation link was break by spouse.
get_target(): Return the target of suitor.
next_target(): Move target to next available person.
"""
class Suitor(Person):
def __init__(self, person_id, feature_list, weight_list):
Person.__init__(self,person_id,feature_list,weight_list)
self.__activity = True
self.__target_iter = 0
def refresh_love_list(self,target_features):
np_list = np.dot(np.array(self.get_weight_list()), np.array(target_features))
self.set_value_list(np.round(np_list,2).tolist())
self.__target_num = len(np_list)
order = {}
for i in range(len(np_list)):
order[i] = round(np_list[i],2)
self.set_love_list([par[0] for par in sorted(order.items(), key=lambda d:d[1], reverse = True)])
def next_target(self):
if self.__target_iter < self.get_accepted_threshold() and self.__target_iter< self.__target_num - 1:
self.__target_iter = self.__target_iter + 1
return True
else:
return False
def go_after(self, receiver, log, info):
if not isinstance(receiver,Suitor):
raise ValueError
husband_id = receiver.get_spouse()
love_list = receiver.get_love_list()
person_id = receiver.get_id()
self_id = self.get_id()
rank = love_list.index(self_id)
accepted_threshold = receiver.get_accepted_threshold()
log.write(' Suitor Rank:'+str(rank)+'\n')
info += ' Suitor Rank:'+str(rank)+'\n'
change_husband = True
if husband_id != -1:
husband_rank = love_list.index(husband_id)
log.write(' Husband Rank: '+str(husband_rank)+'\n')
info += ' Husband Rank: '+str(husband_rank)+'\n'
if rank > husband_rank:
change_husband = False
elif rank > accepted_threshold:
change_husband = False
if change_husband:
log.write(' Succeed: ')
info += ' Succeed: '
receiver.marriage_with(self_id)
receiver.refresh_spouse_num(rank+1)
self.marriage_with(person_id)
self.set_spouse_num(self.__target_iter+1)
log.write(str(self_id) + ' married with '+str(person_id)+'\n')
info += str(self_id) + ' married with '+str(person_id)+'\n'
return True, info
else:
log.write(' Failed\n')
info += ' Failed\n'
self.__refused()
return False, info
def __refused(self):
res = self.next_target()
if not res:
self.__activity = False
return res
def be_thrown(self):
self.dismarriaged()
return self.__refused()
def get_target(self):
love_list = self.get_love_list()
return love_list[self.__target_iter]
def is_activity(self):
return self.__activity
def threw_away(self,suitor):
suitor.be_thrown()
def refresh_spouse_num(self,num):
self.set_spouse_num(num)
"""
CLASS: Receiver
FUNCTION:
threw_away(): Threw spouse.
refresh_spouse_num():Refresh spouse num after threw.
"""
#class Receiver(Person):
"""
CLASS: Matching
PROPERTY:
suitors: A set of instance of CLASS Suitors.
receivers: A set of instance of CLASS Receivers.
suitor_avg_rank: Save sutior average rank in receivers.
receivers_avg_rank:Save receiver average rank in suitors.
FUNCTION:
avg_rank():Caculate receivers_avg_rank and suitor_avg_rank.
start(): Start match experiment.
"""
class Matching(object):
def __init__(self, suitors, receivers):
self.__log = open('log.txt','w')
self.__match_done = False
self.__pre_change = True
self.__now_change = True
self.__times = 0
self.__index = 0
self.__suitors = suitors
self.__receivers = receivers
self.__suitor_features = []
self.__receiver_features = []
for i in range(self.__suitors[0].get_feature_num()):
self.__suitor_features.append([])
self.__receiver_features.append([])
self.__suitor_avg_rank = []
self.__receiver_avg_rank = []
for i in range(len(self.__suitors)):
self.__suitor_avg_rank.append(0.0)
features = self.__suitors[i].get_feature_list()
for j in range(len(features)):
self.__suitor_features[j].append(features[j])
for i in range(len(self.__receivers)):
self.__receiver_avg_rank.append(0.0)
features = self.__receivers[i].get_feature_list()
for j in range(len(features)):
self.__receiver_features[j].append(features[j])
for i in range(len(self.__suitors)):
self.__suitors[i].refresh_love_list(self.__receiver_features)
for i in range(len(self.__receivers)):
self.__receivers[i].refresh_love_list(self.__suitor_features)
def __del__(self):
self.__log.close()
self.__suitor_features = []
self.__receiver_features = []
self.__suitor_avg_rank = []
self.__receiver_avg_rank = []
def compute_avg_rank(self):
self.__suitor_ranks = []
self.__receiver_ranks = []
for i in range(len(self.__suitors)):
self.__suitor_ranks.append([])
for i in range(len(self.__receivers)):
self.__receiver_ranks.append([])
for i in range(len(self.__suitors)):
love_list = self.__suitors[i].get_love_list()
for j in range(len(love_list)):
index = love_list[j]
self.__receiver_avg_rank[index] = self.__receiver_avg_rank[index] + j + 1
self.__receiver_ranks[index].append(j+1)
for i in range(len(self.__receiver_avg_rank)):
self.__receiver_avg_rank[i] = self.__receiver_avg_rank[i] / float(len(self.__suitors))
for i in range(len(self.__receivers)):
love_list = self.__receivers[i].get_love_list()
for j in range(len(love_list)):
index = love_list[j]
self.__suitor_avg_rank[index] = self.__suitor_avg_rank[index] + j + 1
self.__suitor_ranks[index].append(j+1)
for i in range(len(self.__suitor_avg_rank)):
self.__suitor_avg_rank[i] = self.__suitor_avg_rank[i] / float(len(self.__receivers))
self.__suitor_std_rank = [round(np.std(l),2) for l in self.__suitor_ranks]
self.__receiver_std_rank = [round(np.std(l),2) for l in self.__receiver_ranks]
def __add_index(self):
self.__index += 1
if self.__index == len(self.__suitors):
self.__times += 1
self.__index = 0
if not self.__pre_change and not self.__now_change:
self.__match_done = True
self.__log.write('DONE')
else:
self.__pre_change = self.__now_change
self.__now_change = False
return True
def step(self):
show_info = 'EPOCH:'+str(self.__times)+'\n' + ' STEP:'+str(self.__index) + '\n'
self.__log.write(' STEP '+str(self.__index) + '\n')
if self.__match_done:
show_info += ' DONE\n'
return show_info
suitor = self.__suitors[self.__index]
spouse = suitor.get_spouse()
if spouse == -1 and suitor.is_activity():
self.__now_change = True
target = suitor.get_target()
show_info += ' '+str(self.__index)+' target '+str(target)+'\n'
self.__log.write(' '+str(self.__index)+' target '+str(target)+'\n')
if target == -1:
self.__add_index()
return show_info
else:
husband = self.__receivers[target].get_spouse()
flag, show_info = suitor.go_after(self.__receivers[target],self.__log, show_info)
if flag:
if husband >= 0:
self.__receivers[target].threw_away(self.__suitors[husband])
show_info += ' '+str(target)+' threw away '+str(husband)+'\n'
self.__log.write(' '+str(target)+' threw away '+str(husband)+'\n')
elif not suitor.is_activity():
show_info += ' ' + str(self.__index) + ' is not acitivity \n'
elif spouse != -1:
show_info += ' ' + str(self.__index) + ' is married (' + str(spouse) + ')\n'
self.__add_index()
return show_info
def epoch(self):
show_info = ''
self.__log.write('EPOCH '+str(self.__times)+'\n')
for i in range(self.__index, len(self.__suitors)):
self.__index = i
show_info += self.step()
return show_info
def exe_to_end(self):
show_info = ''
while True:
show_info += self.epoch()
if self.__match_done:
break
return show_info
def is_done(self):
return self.__match_done
def print_suitors(self):
print 'id spouse change_num spouse_rank avg_rank std_rank'
for i in range(len(self.__suitors)):
print self.__suitors[i].get_id(), ' ', \
self.__suitors[i].get_spouse(), ' ', \
self.__suitors[i].get_change_num(), ' ', \
self.__suitors[i].get_spouse_num(), ' ', \
self.__suitor_avg_rank[i], ' ', \
self.__suitor_std_rank[i]
def print_receivers(self):
print 'id spouse change_num spouse_rank avg_rank std_rank'
for i in range(len(self.__receivers)):
print self.__receivers[i].get_id(), ' ',\
self.__receivers[i].get_spouse(), ' ',\
self.__receivers[i].get_change_num(), ' ',\
self.__receivers[i].get_spouse_num(), ' ',\
self.__receiver_avg_rank[i], ' ',\
self.__receiver_std_rank[i]
def save_init_information(self,save):
save.write('SUI_LIST\n')
for i in range(len(self.__suitors)):
line = str(self.__suitors[i].get_id()) \
+ ' L: ' + str(self.__suitors[i].get_love_list()) \
+ ' F: ' + str(self.__suitors[i].get_feature_list()) \
+ ' W: ' + str(self.__suitors[i].get_weight_list()) \
+ ' V: ' +str(self.__suitors[i].get_value_list()) + '\n'
save.write(line)
save.write('\nREC_LIST\n')
for i in range(len(self.__receivers)):
line = str(self.__receivers[i].get_id()) \
+ ' L: ' + str(self.__receivers[i].get_love_list()) \
+ ' F: ' + str(self.__receivers[i].get_feature_list()) \
+ ' W: ' + str(self.__receivers[i].get_weight_list()) \
+ ' V: ' +str(self.__receivers[i].get_value_list()) + '\n'
save.write(line)
def save_suitors(self,save):
save.write('id spouse change_num spouse_rank avg_rank std_rank\n')
for i in range(len(self.__suitors)):
line = str(self.__suitors[i].get_id()) + ' ' \
+ str(self.__suitors[i].get_spouse()) + ' '\
+ str(self.__suitors[i].get_change_num()) + ' '\
+ str(self.__suitors[i].get_spouse_num()) + ' ' \
+ str(self.__suitor_avg_rank[i]) + ' ' \
+ str(self.__suitor_std_rank[i]) + '\n'
save.write(line)
def save_receivers(self,save):
save.write('id spouse change_num spouse_rank avg_rank std_rank\n')
for i in range(len(self.__receivers)):
line = str(self.__receivers[i].get_id()) + ' ' \
+ str(self.__receivers[i].get_spouse()) + ' '\
+ str(self.__receivers[i].get_change_num()) + ' '\
+ str(self.__receivers[i].get_spouse_num()) + ' ' \
+ str(self.__receiver_avg_rank[i]) + ' ' \
+ str(self.__receiver_std_rank[i]) + '\n'
save.write(line)
def save_couple_rank():
save.write('suitor_id receiver_id suitor_rank receiver_rank avg_rank rank_diff')
for i in range(len(self.__suitors)):
suitor = self.__suitors[i]
if suitor.get_spouse() != -1:
suitor_rank = self.__suitors_avg_rank[i]
receiver_rank = self.__receivers_avg_rank[i]
line = str(suitors.get_id()) + ' '\
+ str(suitors.get_spouse()) + ' '\
+ str(suitor_rank) + ' '\
+ str(receiver_rank) + ' '\
+ str((suitor_rank+receiver_rank)/2) + ' '\
+ str(abs(suitor_rank-receiver_rank) + '\n')
save.write(line)
def get_avg_rank(self):
return self.__suitor_avg_rank, self.__receiver_avg_rank
def get_std_rank(self):
return self.__suitor_std_rank, self.__receiver_std_rank
def get_spouse_rank(self, num = 0):
if num == 0:
return [s.get_spouse_num() for s in self.__suitors], [r.get_spouse_num() for r in self.__receivers]
elif num > 0:
return self.__suitors[num -1].get_spouse_num()
elif num < 0:
return self.__receivers[abs(num)-1].get_spouse_num()
"""
CLASS: Feature_randomer
PROPERTY:
num: Num of features.
pick_list: List of features.
FUNCTION:
create_feature: Create random features.
"""
class Feature_randomer(object):
def __init__(self,feature_num,person_num):
self.__feature_num = feature_num
self.__person_num = person_num
self.__feature_list = []
def __clear(self):
self.__feature_list = []
def __sigmoid(self,value):
return 1.0/(1.0+math.exp(-value))
def get_feature(self):
return self.__feature_list
def create_feaure(self):
self.__clear()
for i in range(self.__person_num):
f_list = np.round(np.random.normal(5,2,self.__feature_num),2)
self.__feature_list.append(f_list.tolist())
return self.__feature_list
def create_feature_sigmoid(self):
self.__clear()
for i in range(self.__person_num):
f_list = np.round(np.random.normal(0,4,self.__feature_num),2)
self.__feature_list.append(f_list.tolist())
for i in range(self.__person_num):
for j in range(self.__feature_num):
self.__feature_list[i][j] = round(self.__sigmoid(self.__feature_list[i][j]),2)
return self.__feature_list
def create_feature_normalisze(self):
self.__clear()
for i in range(self.__person_num):
f_list = np.round(np.random.normal(5,2,self.__feature_num),2)
self.__feature_list.append(f_list.tolist())
max_feature = []
min_feature = []
for j in range(self.__feature_num):
max_feature.append(-100)
min_feature.append(100)
for i in range(self.__person_num):
for j in range(self.__feature_num):
if self.__feature_list[i][j] > max_feature[j]:
max_feature[j] = self.__feature_list[i][j]
if self.__feature_list[i][j] < min_feature[j]:
min_feature[j] = self.__feature_list[i][j]
length = []
for j in range(self.__feature_num):
length.append(max_feature[j]-min_feature[j])
for i in range(self.__person_num):
for j in range(self.__feature_num):
self.__feature_list[i][j] = round((self.__feature_list[i][j] - min_feature[j]) / length[j], 2)
return self.__feature_list
class Weight_randomer(object):
def __init__(self,weight_num,person_num):
self.__weight_num = weight_num
self.__person_num = person_num
self.__weight_list = []
def __clear(self):
self.__value_list = []
def get_weight_list(self):
return self.__value_list
def create_weight_list(self):
self.__clear()
for i in range(self.__person_num):
x = np.round(np.random.normal(5,2,self.__weight_num),2)
min_value = 100
max_value = -100
sum_value = 0.0
for feature in x:
sum_value = sum_value + feature
for j in range(len(x)):
x[j] = np.round(x[j] / sum_value,2)
res_sum = 0.0
for j in range(len(x)):
res_sum += x[j]
if res_sum != 1.0:
x[0] -= (res_sum - 1.0)
x[0] = np.round(x[0],2)
self.__weight_list.append(x.tolist())
return self.__weight_list
"""
CLASS: List_randomer
PROPERTY:
bottom,top: The range of random.
pick_list: List of random numbers.
FUNCTION:
create_list: Shuffle random numbers.
"""
class List_randomer(object):
def __init__(self, bottom, top):
self.__bottom = bottom
self.__top = top
self.__pick_list = []
num = top - bottom
for i in range(num):
self.__pick_list.append(bottom + i)
def get_pick_list(self):
return self.__pick_list
def create_list(self):
random.shuffle(self.__pick_list)
return self.__pick_list
#Create Suitors/Receivers by Randomer
def create_Person(person_num, feature_num, accepted_threshold = 0):
fr = Feature_randomer(feature_num, person_num)
wr = Weight_randomer(feature_num, person_num)
features = fr.create_feature_normalisze() #fr.create_feature_sigmoid()#
weights = wr.create_weight_list()
persons = []
for i in range(person_num):
person = Suitor(i,features[i],weights[i])
if accepted_threshold:
person.set_accepted_threshold(accepted_threshold)
persons.append(person)
return persons
#Load Suitors/Receivers from Record File
def load_Suitors(path,accepted_threshold = 0):
suis = []
with open(path, 'r') as f:
lines = f.readlines()
for i in len(lines):
love_list = []
data = line[i].strip().split()
for d in data:
love_list.append(int(d))
sui = Suitor(i,love_lists,1)
if accepted_threshold:
rec.set_accepted_threshold(accepted_threshold)
suis.append(sui)
return suis
def load_Receivers(path,accepted_threshold = 0):
recs = []
with open(path, 'r') as f:
lines = f.readlines()
for i in len(lines):
love_list = []
data = line[i].strip().split()
for d in data:
love_list.append(int(d))
rec = Receiver(i,love_lists,1)
if accepted_threshold:
rec.set_accepted_threshold(accepted_threshold)
recs.append(rec)
return recs
|
class Solution:
def processQueries(self, queries, m: int) :
P = [i+1 for i in range(m)]
ret = []
for i in queries:
idx = P.index(i)
ret.append(idx)
t = P.pop(idx)
P = [t] + P
return ret
sol = Solution()
queries = [3,1,2,1]
m = 5
print(sol.processQueries(queries, m)) |
# H.H. Oct 2017
# Augmenting and creating LMDB for NYU-V2
import os
import glob
import random
import numpy as np
import sys
caffe_root = '/home/carrot/caffe/'
sys.path.insert(0, caffe_root + 'python')
import cv2
import caffe
from caffe.proto import caffe_pb2
import lmdb
import skimage.io as io
import h5py
# data path
path_to_depth = './nyu_depth_v2_labeled.mat'
# read mat file
f = h5py.File(path_to_depth)
# read all images original format is [3 x 640 x 480], uint8
i=0
#Size of images
IMAGE_WIDTH = 304
IMAGE_HEIGHT = 228
#Size of depths
DEPTH_WIDTH = 160
DEPTH_HEIGHT = 128
# creating multiple crops
def create_crops(img,type='image'):
if type == 'image':
finalH=IMAGE_HEIGHT
finalW=IMAGE_WIDTH
sizes=[480,360]
else:
finalH=DEPTH_HEIGHT
finalW=DEPTH_WIDTH
sizes=[240,180]
height, width = img.shape[:2]
flag=0
# scaled widths
if height>width:
flag = 1
reses =[]
first_crops =[]
second_crops = []
if flag:
for s in sizes:
reses.append( cv2.resize(img,(s, s*height/width), interpolation = cv2.INTER_CUBIC) )
else:
for s in sizes:
reses.append( cv2.resize(img,(s*width/height,s), interpolation = cv2.INTER_CUBIC) )
'''
for res in reses:
h, w = res.shape[:2]
if flag:
l=w
else:
l=h
first_crops.append( res[0:l, 0:l] )
first_crops.append( res[(h-l):h, (w-l):w] )
first_crops.append( res[(h/2-l/2):(h/2+l/2), (w/2-l/2):(w/2+l/2)] )
'''
for crop in reses:
h, w = crop.shape[:2] # first one with second height
second_crops.append( crop[0:finalH, 0:finalW] )
second_crops.append( crop[(h-finalH):h, 0:finalW] )
second_crops.append( crop[0:finalH, (w-finalW):w] )
second_crops.append( crop[(h-finalH):h, (w-finalW):w] )
second_crops.append( crop[(h/2-(finalH/2)):(h/2+(finalH/2)), (w/2-(finalW/2)):(w/2+(finalW/2))] )
second_crops.append( cv2.resize(crop,(finalW,finalH), interpolation = cv2.INTER_CUBIC) )
return second_crops
#def Augment_img():
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
#Histogram Equalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
#Image Resizing
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
return img
def make_datum(img, label,form='image'):
if form == 'image':
#image is numpy.ndarray format. BGR instead of RGB
return caffe_pb2.Datum(
channels=3,
width=IMAGE_WIDTH,
height=IMAGE_HEIGHT,
label=label,
data= img)#np.rollaxis(img, 2).tostring())
elif form == 'depth':
#depth is numpy.ndarray WxH
return caffe_pb2.Datum(
channels=1,
width=DEPTH_WIDTH,
height=DEPTH_HEIGHT,
label=label,
data= img)#np.rollaxis(img, 2).tostring())
else:
print "WRONG INPUT!"
train_lmdb_images = '/home/carrot/NYU/train_lmdb_images'
train_lmdb_depths = '/home/carrot/NYU/train_lmdb_depths'
validation_lmdb_images = '/home/carrot/NYU/validation_lmdb_images'
validation_lmdb_depths = '/home/carrot/NYU/validation_lmdb_depths'
os.system('rm -rf ' + train_lmdb_images)
os.system('rm -rf ' + validation_lmdb_images)
os.system('rm -rf ' + train_lmdb_depths)
os.system('rm -rf ' + validation_lmdb_depths)
#Shuffle train_data
#random.shuffle(train_data)
print 'Creating train_lmdb_image'
i=0
j=0
in_db = lmdb.open(train_lmdb_images, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for img in f['images']:
#print i
if j%6 ==0:
j=j+1
continue
# reshape
img_ = np.empty([480, 640, 3])
img_[:,:,0] = img[0,:,:].T
img_[:,:,1] = img[1,:,:].T
img_[:,:,2] = img[2,:,:].T
img_ = img_.astype(np.uint8)
#img_ = transform_img(img_, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
img_list =create_crops(img_,type='image')
for img_ in img_list:
# making datum
img_=np.rollaxis(img_, 2).tostring()
datum = make_datum(img_, 0)
in_txn.put('{:0>5d}'.format(i), datum.SerializeToString())
i=i+1
print ('Finished processing image {}'.format(i))
j=j+1
in_db.close()
print 'Creating train_lmdb_depth'
i=0
j=0
in_db = lmdb.open(train_lmdb_depths, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
# read corresponding depth (aligned to the image, in-painted) of size [640 x 480], float64
for depth in f['depths']:
#print i
if j%6 ==0:
j=j+1
continue
# reshape
depth_ = np.empty([480, 640, 1])
depth_[:,:,0] = depth[:,:].T
depth_list =create_crops(depth_,type='image')
for depth_ in depth_list:
print depth_.shape[:2]
# resize
depth_ = cv2.resize(depth_, (160, 128), interpolation = cv2.INTER_CUBIC)
depth_=depth_.tostring()
# make datum
datum = make_datum(depth_, 0,form='depth')
in_txn.put('{:0>5d}'.format(i), datum.SerializeToString())
i=i+1
print ('Finished processing image {}'.format(i))
j=j+1
in_db.close()
print 'Creating validation_lmdb_images'
i=0
j=0
in_db = lmdb.open(validation_lmdb_images, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for img in f['images']:
#print i
if j%6 !=0:
j=j+1
continue
# reshape
img_ = np.empty([480, 640, 3])
img_[:,:,0] = img[0,:,:].T
img_[:,:,1] = img[1,:,:].T
img_[:,:,2] = img[2,:,:].T
img_ = img_.astype(np.uint8)
img_list =create_crops(img_,type='image')
for img_ in img_list:
# making datum
img_=np.rollaxis(img_, 2).tostring()
datum = make_datum(img_, 0)
in_txn.put('{:0>5d}'.format(i), datum.SerializeToString())
i=i+1
print ('Finished processing image {}'.format(i))
j=j+1
in_db.close()
print 'Creating validation_lmdb_depth'
i=0
j=0
in_db = lmdb.open(validation_lmdb_depths, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
# read corresponding depth (aligned to the image, in-painted) of size [640 x 480], float64
for depth in f['depths']:
#print i
if j%6 !=0:
j=j+1
continue
# reshape
depth_ = np.empty([480, 640, 1])
depth_[:,:,0] = depth[:,:].T
depth_list =create_crops(depth_,type='image')
for depth_ in depth_list:
# resize
depth_ = cv2.resize(depth_, (160, 128), interpolation = cv2.INTER_CUBIC)
depth_=depth_.tostring()
# make datum
datum = make_datum(depth_, 0,form='depth')
in_txn.put('{:0>5d}'.format(i), datum.SerializeToString())
i=i+1
print ('Finished processing image {}'.format(i))
j=j+1
in_db.close()
print '\nFinished processing all images'
|
# !usr/bin/python3.4
# -*- coding:utf-8 -*-
import json
# import grequests
import requests
import re
import time
from tool.jfile.file import *
def exception_handler(request, exception):
print('连接错误...')
# def geturl(urls):
# header = {'User-Agent':
# 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0',
# 'Referer': 'http://cn.bing.com',
# 'Host': 'cn.bing.com'}
#
# # 保持连接畅通
# sn = requests.Session()
# rs = [grequests.get(url, headers=header, session=sn) for url in urls]
#
# return grequests.map(rs, exception_handler=exception_handler, gtimeout=10)
def geturlnot(urls):
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0',
'Referer': 'http://cn.bing.com',
'Host': 'cn.bing.com'}
rs = [requests.get(url, headers=header) for url in urls]
return rs
def get(url):
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0',
'Referer': 'http://cn.bing.com',
'Host': 'cn.bing.com'}
# 解析网页
html_bytes = requests.get(url, headers=header)
return html_bytes
def prints(timesleep):
print('暂停' + str(timesleep) + '秒后开始批量下载图片,请保持网络畅通...')
time.sleep(timesleep)
if __name__ == '__main__':
dirpath = '../../data/bing/' + todaystring()
createjia(dirpath)
i = 0
# 抓取频次
every = 5
# 休息时间
timesleep = 1
img = []
imgname = []
# 错误个数
errortimes = 0
errormax = 3
while True:
url = 'http://cn.bing.com/HPImageArchive.aspx?format=js&idx=' + str(i) + '&n=1'
contents = get(url)
data = contents.content.decode('utf-8', 'ignore')
data = json.loads(data)
try:
onefile = data['images']
for item in onefile:
img.append(item['url'])
imgname.append(item['copyright'].replace(' ', ''))
print(img[i])
i = i + 1
except Exception as err:
print(err)
errortimes = errortimes + 1
if errortimes == errormax:
break
else:
pass
# 每次累计到一定程度就并发抓取
if i % every == 0:
print('已经搜集好网址...')
prints(timesleep)
print('正在下载...')
try:
pics = geturlnot(img)
except Exception as err:
print(err)
errortimes = errortimes + 1
if errortimes == errormax:
break
else:
pass
j = 0
for pic in pics:
filenamep = dirpath + "/" + validateTitle(imgname[j] + '.jpg')
filess = open(filenamep, 'wb')
filess.write(pic.content)
filess.close()
print('已经写入第' + str(j + 1) + '张图片')
j = j + 1
prints(timesleep)
|
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
from django_plotly_dash import DjangoDash
from sympy import latex, sympify, integrate, Symbol
import math
from numpy import linspace
import dash_defer_js_import as dji
external_stylesheets = ['https://codepen.io/chriddyp/pen/dZVMbK.css']
app = DjangoDash('SimpleExample', external_stylesheets=external_stylesheets, external_scripts=[
'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.4/MathJax.js?config=TeX-MML-AM_CHTML',
])
mathjax_script = dji.Import(src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS-MML_SVG")
refresh_plots = dji.Import("https://codepen.io/chrisvoncsefalvay/pen/ExPJjWP.js")
app.layout = html.Div([
html.Div(["First function: ",
dcc.Input(id='fn1', value='x**2', type='text')]),
html.Br(),
html.Div(["Second function: ",
dcc.Input(id='fn2', value='x', type='text')]),
html.Br(),
html.Div(["Limits: ",
dcc.Input(id='lower_limit', value='-1', type='text'),
dcc.Input(id='upper_limit', value='1', type='text')]),
html.Br(),
html.H3("Area between curves:"),
html.H3(id='my-output'),
html.Div([dcc.Graph(id='graph')]),
refresh_plots,
mathjax_script
])
@app.callback(
[Output(component_id='my-output', component_property='children'),
Output('graph', 'figure')],
[Input(component_id='fn1', component_property='value'),
Input(component_id='fn2', component_property='value'),
Input(component_id='lower_limit', component_property='value'),
Input(component_id='upper_limit', component_property='value')]
)
def update_output_div(fn1, fn2, lower_limit, upper_limit):
def to_float(s):
constants = {"pi": 3.14159, "e": 2.71928, "-pi": -3.14159, "-e": -2.71928,
"inf": math.inf, "-inf": -math.inf}
if s in constants:
return constants[s]
else:
return float(s)
def f(x):
return eval(fn1)
def g(x):
return eval(fn2)
x = Symbol('x')
l = to_float(lower_limit)
u = to_float(upper_limit)
output = integrate(f(x) - g(x), (x, l, u))
x = linspace(l, u, 30)
y1 = [f(x) for x in x]
y2 = [g(x) for x in x]
t = "r'$\\int_{" + lower_limit + "}^{" + upper_limit + "}" + latex(sympify(fn1)) + " - " + latex(
sympify(fn2)) + "$"
figure = px.line(x=x, y=[y1, y2], title=t)
out = '{}'.format(output)
return out, figure
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BcBusinessUserInfo(object):
def __init__(self):
self._logo = None
self._name = None
self._open_id = None
self._uid = None
@property
def logo(self):
return self._logo
@logo.setter
def logo(self, value):
self._logo = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, value):
self._uid = value
def to_alipay_dict(self):
params = dict()
if self.logo:
if hasattr(self.logo, 'to_alipay_dict'):
params['logo'] = self.logo.to_alipay_dict()
else:
params['logo'] = self.logo
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.uid:
if hasattr(self.uid, 'to_alipay_dict'):
params['uid'] = self.uid.to_alipay_dict()
else:
params['uid'] = self.uid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BcBusinessUserInfo()
if 'logo' in d:
o.logo = d['logo']
if 'name' in d:
o.name = d['name']
if 'open_id' in d:
o.open_id = d['open_id']
if 'uid' in d:
o.uid = d['uid']
return o
|
# Discord Bot
import logging
import os
import sys
import discord
import logbook
import yaml
from discord.ext import commands
from discord.ext.commands import Bot
from logbook import Logger
from logbook import StreamHandler
from logbook.compat import redirect_logging
extensions = ["casca.cogs.mathematics"]
Whitelisted_Servers = ["221708975698083841", # GERMAN LEARNING AND DISCUSSION
"245333247796576257", # STEAMBOAT
"293111771428945920", # BOT-TESTING
"206935992022728704"] # HYPNOSIS
Whitelisted_Channels = ["221708975698083841", # GERMAN LEARNING AND DISCUSSION: general
"221709483284496394", # GERMAN LEARNING AND DISCUSSION: learning
"222013061886640128", # GERMAN LEARNING AND DISCUSSION: deutsch-only
"259006631185088516", # GERMAN LEARNING AND DISCUSSION: introductions
"251115764680097794", # GERMAN LEARNING AND DISCUSSION: announcements
"252121415912914946", # GERMAN LEARNING AND DISCUSSION: writing
"248530603165614080", # GERMAN LEARNING AND DISCUSSION: botchannel
"260865272292835329", # GERMAN LEARNING AND DISCUSSION: 0x1-bot
"245333247796576257", # STEAMBOAT: general
"293111771428945920", # BOT-TESTING: general
"206935992022728704"] # HYPNOSIS: main
class Casca(Bot):
def __init__(self, *args, **kwargs):
config_file = os.path.join(os.getcwd(), "config.yaml")
with open(config_file) as f:
self.config = yaml.load(f)
super().__init__(*args, **kwargs)
# Define the logging set up.
redirect_logging()
StreamHandler(sys.stderr).push_application()
self.logger = Logger("Casca_Best_Bot")
self.logger.level = getattr(logbook, self.config.get("log_level", "INFO"), logbook.INFO)
# Set the root logger level, too.
logging.root.setLevel(self.logger.level)
self._loaded = False
async def on_ready(self):
if self._loaded:
return
self.logger.info(
"LOADED Casca | LOGGED IN AS: {0.user.name}#{0.user.discriminator}.\n----------------------------------------------------------------------------------------------------".format(
self))
for cog in extensions:
try:
self.load_extension(cog)
except Exception as e:
self.logger.critical("Could not load extension `{}` -> `{}`".format(cog, e))
self.logger.exception()
else:
self.logger.info("Loaded extension {}.".format(cog))
self._loaded = True
async def on_message(self, message):
if not message.server:
return
if message.server.id not in Whitelisted_Servers:
return
if message.channel.id not in Whitelisted_Channels:
return
self.logger.info(
"MESSAGE: {message.content}".format(message=message, bot=" [BOT]" if message.author.bot else ""))
self.logger.info("FROM: {message.author.name}".format(message=message))
if message.server is not None:
self.logger.info("CHANNEL: {message.channel.name}".format(message=message))
self.logger.info(
"SERVER: {0.server.name}\n----------------------------------------------------------------------------------------------------".format(
message))
await super().on_message(message)
async def on_command_error(self, e, ctx):
if isinstance(e, (commands.errors.BadArgument, commands.errors.MissingRequiredArgument)):
await self.send_message(ctx.message.channel, "```ERROR: {}```".format(' '.join(e.args)))
return
async def on_command(self, command, ctx):
await self.delete_message(ctx.message)
def run(self):
try:
super().run(self.config["bot"]["token"], bot=True)
except discord.errors.LoginFailure as e:
self.logger.error("LOGIN FAILURE: {}".format(e.args[0]))
sys.exit(2)
|
def readFile():
datalist = []
datafile = open("./data/day#data.txt", "r")
for aline in datafile:
transactions.append(int(aline))
datafile.close()
return datalist
def part1():
return ('part1')
def part2():
return ("part2")
def test():
test_input = []
assert part1(test_input) == 'part1'
assert part2(test_input) == 'part2'
if __name__ == "__main__":
test()
vals = readFile()
print(f"Part 1: {part1(vals)}")
print(f"Part 2: {part2(vals)}") |
A = int(input())
B = int(input())
C = int(input())
mul = A * B * C
mul_list = list(str(mul)) # ['1', '8', '6', '0', '8', '6', '7']
print(mul_list.count('0'))
print(mul_list.count('1'))
print(mul_list.count('2'))
print(mul_list.count('3'))
print(mul_list.count('4'))
print(mul_list.count('5'))
print(mul_list.count('6'))
print(mul_list.count('7'))
print(mul_list.count('8'))
print(mul_list.count('9'))
# print문을 for문으로 줄이고 싶은데.. |
from django.test import SimpleTestCase, Client
from django.conf import settings
import asyncio
import uvloop
import requests
import json
from .repository.get_api_data import ApiHandler
from .data_processor.data_processor import Processor
from .views import HomeView
home = HomeView()
processor = Processor()
api_handler = ApiHandler()
urlForecast = (
"https://api.openweathermap.org/data/2.5/forecast?id=6322515&appid="
+ settings.WEATHER_API_KEY
)
urlWeather = (
"https://api.openweathermap.org/data/2.5/weather?id=6322515&appid="
+ settings.WEATHER_API_KEY
)
response_forecast0 = api_handler.get_forecast()
data_forecast0 = json.loads(response_forecast0)["list"]
response_forecast1 = requests.get(urlForecast)
data_forecast1 = response_forecast1.json()["list"]
response_weather0 = api_handler.get_weather_now()
data_weather0 = json.loads(response_weather0)["main"]
response_weather1 = requests.get(urlWeather)
data_weather1 = response_weather1.json()["main"]
c = Client()
response_get = c.get("/")
response_post = c.post("/")
class TestApiHandler(SimpleTestCase):
def test_urls(self):
self.assertEqual(api_handler.urlForecast, urlForecast)
self.assertEqual(api_handler.urlWeatherNow, urlWeather)
print("Tested Api url to be called", flush=True)
def test_retrieve(self):
self.assertEqual(data_forecast0, data_forecast1)
self.assertEqual(data_weather0, data_weather1)
print("Tested api data retrieve", flush=True)
class HomeViewTestCase(SimpleTestCase):
def test_response_200(self):
self.assertEqual(response_get.status_code, 200)
print("Tested response 200", flush=True)
def test_response_405(self):
self.assertEqual(response_post.status_code, 405)
print("Tested response 400", flush=True)
def test_template(self):
self.assertTemplateUsed(response_get, "home.html")
print("Tested Template usage", flush=True)
|
from django import forms
from clubkit.clubs.models import ClubInfo, Team, Pitch, ClubPosts, ClubMemberships, ClubPackages
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
# Form to update/change club information
class ClubInfoForm(forms.ModelForm):
club_address2 = forms.CharField(required=False)
club_address3 = forms.CharField(required=False)
class Meta():
model = ClubInfo
fields = ('club_name', 'club_logo', 'description', 'club_address1', 'club_address2',
'club_address3', 'club_town', 'club_county', 'club_country', 'paypal_id')
labels = {
'paypal_id': 'Paypal Email - required to receive payments'
}
def clean_club_name(self):
club_name = self.cleaned_data['club_name']
if ClubInfo.objects.filter(club_name=club_name).exists():
raise ValidationError(_("Club already exists"))
return club_name
# Form to obtain club team information
class TeamForm(forms.ModelForm):
class Meta():
model = Team
fields = ('club_id', 'team_name', 'manager_name', 'photo')
def __init__(self, *args, **kwargs):
super(TeamForm, self).__init__(*args, **kwargs)
self.fields['club_id'].widget = forms.HiddenInput()
def clean_team_name(self):
team_name = self.cleaned_data['team_name']
if Team.objects.filter(team_name=team_name).exists():
raise ValidationError(_("Team already exists"))
return team_name
# Form to obtain club pitch information
class PitchForm(forms.ModelForm):
class Meta():
model = Pitch
fields = ('club_id', 'pitch_name', 'photo', 'pitch_size', 'pitch_type', 'open_time',
'close_time', 'rental', 'rental_price', 'max_people')
def __init__(self, *args, **kwargs):
super(PitchForm, self).__init__(*args, **kwargs)
self.fields['club_id'].widget = forms.HiddenInput()
# Form to obtain club post information
class ClubPostForm(forms.ModelForm):
class Meta():
model = ClubPosts
fields = '__all__'
def __init__(self, *args, **kwargs):
super(ClubPostForm, self).__init__(*args, **kwargs)
self.fields['created_date'].widget = forms.HiddenInput()
self.fields['club_id'].widget = forms.HiddenInput()
# Form to obtain club membership information
class MembershipsForm(forms.ModelForm):
class Meta():
model = ClubMemberships
fields = '__all__'
def __init__(self, *args, **kwargs):
super(MembershipsForm, self).__init__(*args, **kwargs)
self.fields['club_id'].widget = forms.HiddenInput()
# Form to handle club access to packages
class ClubPackagesForm(forms.ModelForm):
class Meta():
model = ClubPackages
fields = ('club_id', 'player_register_package', 'roster_package', 'rent_a_pitch_package', 'shop_package')
def __init__(self, *args, **kwargs):
super(ClubPackagesForm, self).__init__(*args, **kwargs)
|
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import train_test_split
import joblib #jbolib模块
overwrite = False
# 选取前一百个特征 100例AutoML得到的pipeline
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
medical = pd.read_csv('train_fenlie1.csv')
medical = medical.fillna(-999)
medical_new = medical.drop(['ID','Label'], axis=1)
#pd.isnull(medical_new).any()
medical_new = np.array(medical_new.values,dtype=float)
medical_new[np.isnan(medical_new)] = -999
features = medical_new
training_features, testing_features, training_target, testing_target = \
train_test_split(features, medical['Label'].values , random_state=42)
# Average CV score on the training set was: -15.981117264229795
if overwrite:
exported_pipeline = ExtraTreesRegressor(bootstrap=True, max_features=0.5, min_samples_leaf=1, min_samples_split=2, n_estimators=100)
# Fix random state in exported estimator
if hasattr(exported_pipeline, 'random_state'):
setattr(exported_pipeline, 'random_state', 42)
exported_pipeline.fit(training_features, training_target)
joblib.dump(exported_pipeline, 'Baseline_train_fenlie1_model.pkl')
else:
exported_pipeline = joblib.load('Baseline_train_fenlie1_model.pkl')
results = exported_pipeline.predict(testing_features)
print(results)
print(testing_target) |
import dlib
from PyQt5.QtCore import QThread
from PyQt5.QtWidgets import QMainWindow
from identity.pass_login import Ui_MainWindow
import logging.config
import logging.config
import winsound
import time
from gui import *
from datetime import datetime
# 找不到已训练的人脸数据文件
class TrainingDataNotFoundError(FileNotFoundError):
pass
# 找不到数据库文件
class DatabaseNotFoundError(FileNotFoundError):
pass
class CoreUI(QMainWindow):
database = './identity/FaceBase.db'
trainingData = './identity/recognizer/trainingData.yml'
cap = cv2.VideoCapture()
captureQueue = queue.Queue() # 图像队列
alarmQueue = queue.LifoQueue() # 报警队列,后进先出
logQueue = multiprocessing.Queue() # 日志队列
receiveLogSignal = pyqtSignal(str) # LOG信号
def __init__(self):
super(CoreUI, self).__init__()
loadUi('./identity/ui/Core.ui', self)
self.setWindowIcon(QIcon('./identity/icons/icon.png'))
self.setWindowTitle('pc端个人隐私防护系统 - 身份认证')
self.alarm_flag = 0
self.log_flag = 0
self.pushButton.clicked.connect(self.goto_password_verify)
self.pushButton.setEnabled(False)
# 图像捕获
self.faceProcessingThread = FaceProcessingThread()
# 数据库
self.initDb()
self.timer = QTimer(self) # 初始化一个定时器
self.timer.timeout.connect(self.updateFrame)
# 报警系统
self.alarmSignalThreshold = 3
self.panalarmThread = threading.Thread(target=self.recieveAlarm, daemon=True)
self.isBellEnabled = True
# 日志系统
self.receiveLogSignal.connect(lambda log: self.logOutput(log))
self.logOutputThread = threading.Thread(target=self.receiveLog, daemon=True)
self.logOutputThread.start()
self.startWebcam()
self.isBellEnabled = True
self.timeThreshold = 4
def goto_password_verify(self):
ui.show()
try:
window.close()
except Exception as err:
print(err)
# 检查数据库状态
def initDb(self):
try:
if not os.path.isfile(self.database):
raise DatabaseNotFoundError
if not os.path.isfile(self.trainingData):
raise TrainingDataNotFoundError
conn = sqlite3.connect(self.database)
cursor = conn.cursor()
cursor.execute('SELECT Count(*) FROM users')
result = cursor.fetchone()
dbUserCount = result[0]
except DatabaseNotFoundError:
logging.error('系统找不到数据库文件{}'.format(self.database))
self.initDbButton.setIcon(QIcon('identity/icons/error.png'))
self.logQueue.put('Error:未发现数据库文件,你可能未进行人脸采集')
except TrainingDataNotFoundError:
logging.error('系统找不到已训练的人脸数据{}'.format(self.trainingData))
self.initDbButton.setIcon(QIcon('identity/icons/error.png'))
self.logQueue.put('Error:未发现已训练的人脸数据文件,请完成训练后继续')
except Exception as e:
logging.error('读取数据库异常,无法完成数据库初始化')
self.initDbButton.setIcon(QIcon('identity/icons/error.png'))
self.logQueue.put('Error:读取数据库异常,初始化数据库失败')
else:
cursor.close()
conn.close()
if not dbUserCount > 0:
logging.warning('数据库为空')
self.logQueue.put('warning:数据库为空,人脸识别功能不可用')
self.initDbButton.setIcon(QIcon('identity/icons/warning.png'))
else:
self.logQueue.put('Success:数据库状态正常,发现用户数:{}'.format(dbUserCount))
# 打开/关闭摄像头
def startWebcam(self):
if not self.cap.isOpened():
camID = 0
self.cap.open(camID)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
ret, frame = self.cap.read()
if not ret:
logging.error('无法调用电脑摄像头{}'.format(camID))
self.logQueue.put('Error:初始化摄像头失败')
self.cap.release()
self.startWebcamButton.setIcon(QIcon('identity/icons/error.png'))
else:
self.faceProcessingThread.start() # 启动OpenCV图像处理线程
self.timer.start(5) # 启动定时器
self.panalarmThread.start() # 启动报警系统线程
else:
text = '如果关闭摄像头,须重启程序才能再次打开。'
informativeText = '<b>是否继续?</b>'
ret = CoreUI.callDialog(QMessageBox.Warning, text, informativeText, QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if ret == QMessageBox.Yes:
self.faceProcessingThread.stop()
if self.cap.isOpened():
if self.timer.isActive():
self.timer.stop()
self.cap.release()
self.realTimeCaptureLabel.clear()
self.realTimeCaptureLabel.setText('<font color=red>摄像头未开启</font>')
self.startWebcamButton.setText('摄像头已关闭')
self.startWebcamButton.setEnabled(False)
self.startWebcamButton.setIcon(QIcon())
# 定时器,实时更新画面
def updateFrame(self):
if self.cap.isOpened():
if not self.captureQueue.empty():
captureData = self.captureQueue.get()
realTimeFrame = captureData.get('realTimeFrame')
self.displayImage(realTimeFrame, self.realTimeCaptureLabel)
# 显示图片
def displayImage(self, img, qlabel):
# BGR -> RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# default:The image is stored using 8-bit indexes into a colormap, for example:a gray image
qformat = QImage.Format_Indexed8
if len(img.shape) == 3: # rows[0], cols[1], channels[2]
if img.shape[2] == 4:
# The image is stored using a 32-bit byte-ordered RGBA format (8-8-8-8)
# A: alpha channel,不透明度参数。如果一个像素的alpha通道数值为0%,那它就是完全透明的
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
qlabel.setPixmap(QPixmap.fromImage(outImage))
qlabel.setScaledContents(True) # 图片自适应大小
# 设备响铃进程
@staticmethod
def bellProcess(queue):
logQueue = queue
logQueue.put('Info:设备正在响铃...')
winsound.PlaySound('./identity/alarm.wav', winsound.SND_FILENAME)
# 报警系统服务常驻,接收并处理报警信号
def recieveAlarm(self):
self.count_time = 0
while True:
if (self.alarm_flag==1):
break
jobs = []
time.sleep(1)
self.count_time +=1
if self.count_time>self.timeThreshold and self.alarmQueue.qsize() <= self.alarmSignalThreshold:
self.pushButton.setEnabled(True)
self.cap.release()
self.alarm_flag=1
self.log_flag=1
self.timer.stop()
self.logQueue.put('人脸认证通过,请按进入系统按钮')
self.faceProcessingThread.stop()
if self.alarmQueue.qsize() > self.alarmSignalThreshold: # 若报警信号触发超出既定计数,进行报警
if not os.path.isdir('./identity/unknown'):
os.makedirs('./identity/unknown')
lastAlarmSignal = self.alarmQueue.get()
timestamp = lastAlarmSignal.get('timestamp')
img = lastAlarmSignal.get('img')
# 疑似陌生人脸,截屏存档
cv2.imwrite('./identity/unknown/{}.jpg'.format(timestamp), img)
logging.info('报警信号触发超出预设计数,自动报警系统已被激活')
self.logQueue.put('Info:报警信号触发超出预设计数,自动报警系统已被激活')
# 是否进行响铃
if self.isBellEnabled:
p1 = multiprocessing.Process(target=CoreUI.bellProcess, args=(self.logQueue,))
p1.start()
jobs.append(p1)
# 等待本轮报警结束
for p in jobs:
p.join()
# 重置报警信号
with self.alarmQueue.mutex:
self.alarmQueue.queue.clear()
else:
continue
# 系统日志服务常驻,接收并处理系统日志
def receiveLog(self):
while True:
if (self.log_flag==1):
break
data = self.logQueue.get()
if data:
self.receiveLogSignal.emit(data)
else:
continue
# LOG输出
def logOutput(self, log):
# 获取当前系统时间
time = datetime.now().strftime('[%Y/%m/%d %H:%M:%S]')
log = time + ' ' + log + '\n'
self.logTextEdit.moveCursor(QTextCursor.End)
self.logTextEdit.insertPlainText(log)
self.logTextEdit.ensureCursorVisible() # 自动滚屏
# 系统对话框
@staticmethod
def callDialog(icon, text, informativeText, standardButtons, defaultButton=None):
msg = QMessageBox()
msg.setWindowIcon(QIcon('identity/icons/icon.png'))
msg.setWindowTitle('pc端个人隐私防护系统 - 身份认证')
msg.setIcon(icon)
msg.setText(text)
msg.setInformativeText(informativeText)
msg.setStandardButtons(standardButtons)
if defaultButton:
msg.setDefaultButton(defaultButton)
return msg.exec()
# 窗口关闭事件,关闭OpenCV线程、定时器、摄像头
def closeEvent(self, event):
if self.faceProcessingThread.isRunning:
self.faceProcessingThread.stop()
if self.timer.isActive():
self.timer.stop()
if self.cap.isOpened():
self.cap.release()
event.accept()
# OpenCV线程
class FaceProcessingThread(QThread):
def __init__(self):
super(FaceProcessingThread, self).__init__()
self.isRunning = True
self.isFaceTrackerEnabled = True
self.isFaceRecognizerEnabled = False
self.isPanalarmEnabled = True
self.isDebugMode = False
self.confidenceThreshold = 50
self.autoAlarmThreshold = 65
self.isFaceTrackerEnabled = True
self.isFaceRecognizerEnabled = True
self.isPanalarmEnabled = True
def run(self):
faceCascade = cv2.CascadeClassifier('./identity/haarcascades/haarcascade_frontalface_default.xml')
# 帧数、人脸ID初始化
frameCounter = 0
currentFaceID = 0
# 人脸跟踪器字典初始化
faceTrackers = {}
isTrainingDataLoaded = False
isDbConnected = False
while self.isRunning:
if CoreUI.cap.isOpened():
ret, frame = CoreUI.cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5, minSize=(90, 90))
# 预加载数据文件
if not isTrainingDataLoaded and os.path.isfile(CoreUI.trainingData):
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(CoreUI.trainingData)
isTrainingDataLoaded = True
if not isDbConnected and os.path.isfile(CoreUI.database):
conn = sqlite3.connect(CoreUI.database)
cursor = conn.cursor()
isDbConnected = True
captureData = {}
realTimeFrame = frame.copy()
alarmSignal = {}
# 人脸跟踪
if self.isFaceTrackerEnabled:
# 要删除的人脸跟踪器列表初始化
fidsToDelete = []
for fid in faceTrackers.keys():
# 实时跟踪
trackingQuality = faceTrackers[fid].update(realTimeFrame)
# 如果跟踪质量过低,删除该人脸跟踪器
if trackingQuality < 7:
fidsToDelete.append(fid)
# 删除跟踪质量过低的人脸跟踪器
for fid in fidsToDelete:
faceTrackers.pop(fid, None)
for (_x, _y, _w, _h) in faces:
isKnown = False
if self.isFaceRecognizerEnabled:
cv2.rectangle(realTimeFrame, (_x, _y), (_x + _w, _y + _h), (232, 138, 30), 2)
face_id, confidence = recognizer.predict(gray[_y:_y + _h, _x:_x + _w])
logging.debug('face_id:{},confidence:{}'.format(face_id, confidence))
if self.isDebugMode:
CoreUI.logQueue.put('Debug -> face_id:{},confidence:{}'.format(face_id, confidence))
# 从数据库中获取识别人脸的身份信息
try:
cursor.execute("SELECT * FROM users WHERE face_id=?", (face_id,))
result = cursor.fetchall()
if result:
en_name = result[0][3]
else:
raise Exception
except Exception as e:
logging.error('读取数据库异常,系统无法获取Face ID为{}的身份信息'.format(face_id))
CoreUI.logQueue.put('Error:读取数据库异常,系统无法获取Face ID为{}的身份信息'.format(face_id))
en_name = ''
# 若置信度评分小于置信度阈值,认为是可靠识别
if confidence < self.confidenceThreshold:
isKnown = True
cv2.putText(realTimeFrame, en_name, (_x - 5, _y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 97, 255), 2)
else:
# 若置信度评分大于置信度阈值,该人脸可能是陌生人
cv2.putText(realTimeFrame, 'unknown', (_x - 5, _y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255), 2)
# 若置信度评分超出自动报警阈值,触发报警信号
if confidence > self.autoAlarmThreshold:
# 检测报警系统是否开启
if self.isPanalarmEnabled:
alarmSignal['timestamp'] = datetime.now().strftime('%Y%m%d%H%M%S')
alarmSignal['img'] = realTimeFrame
CoreUI.alarmQueue.put(alarmSignal)
logging.info('系统发出了报警信号')
# 帧数自增
frameCounter += 1
# 每读取10帧,检测跟踪器的人脸是否还在当前画面内
if frameCounter % 10 == 0:
# 这里必须转换成int类型,因为OpenCV人脸检测返回的是numpy.int32类型,
# 而dlib人脸跟踪器要求的是int类型
x = int(_x)
y = int(_y)
w = int(_w)
h = int(_h)
# 计算中心点
x_bar = x + 0.5 * w
y_bar = y + 0.5 * h
# matchedFid表征当前检测到的人脸是否已被跟踪
matchedFid = None
for fid in faceTrackers.keys():
# 获取人脸跟踪器的位置
# tracked_position 是 dlib.drectangle 类型,用来表征图像的矩形区域,坐标是浮点数
tracked_position = faceTrackers[fid].get_position()
# 浮点数取整
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
# 计算人脸跟踪器的中心点
t_x_bar = t_x + 0.5 * t_w
t_y_bar = t_y + 0.5 * t_h
# 如果当前检测到的人脸中心点落在人脸跟踪器内,且人脸跟踪器的中心点也落在当前检测到的人脸内
# 说明当前人脸已被跟踪
if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and
(x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
matchedFid = fid
# 如果当前检测到的人脸是陌生人脸且未被跟踪
if not isKnown and matchedFid is None:
# 创建一个人脸跟踪器
tracker = dlib.correlation_tracker()
# 锁定跟踪范围
tracker.start_track(realTimeFrame, dlib.rectangle(x - 5, y - 10, x + w + 5, y + h + 10))
# 将该人脸跟踪器分配给当前检测到的人脸
faceTrackers[currentFaceID] = tracker
# 人脸ID自增
currentFaceID += 1
# 使用当前的人脸跟踪器,更新画面,输出跟踪结果
for fid in faceTrackers.keys():
tracked_position = faceTrackers[fid].get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
# 在跟踪帧中圈出人脸
cv2.rectangle(realTimeFrame, (t_x, t_y), (t_x + t_w, t_y + t_h), (0, 0, 255), 2)
cv2.putText(realTimeFrame, 'tracking...', (15, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255),
2)
captureData['originFrame'] = frame
captureData['realTimeFrame'] = realTimeFrame
CoreUI.captureQueue.put(captureData)
else:
continue
# 停止OpenCV线程
def stop(self):
self.isRunning = False
self.quit()
self.wait()
if __name__ == '__main__':
logging.config.fileConfig('./identity/config/logging.cfg')
app = QApplication(sys.argv)
window = CoreUI()
ui = Ui_MainWindow()
window.show()
sys.exit(app.exec())
|
class PowerSupply:
def __init__(self, data):
self.vertexes = set()
self.edges = data
self.edges.sort(key=lambda x: x[2])
self.make_vertexes()
def make_vertexes(self):
for edge in self.edges:
self.vertexes.add(edge[0])
self.vertexes.add(edge[2])
def remove_paralel_loops(self):
for
def Kruskal(self):
for edge in self.edges:
pass
def main():
junction_connections = 12
data = [[1, 2, 1100],
[1, 3, 1400],
[1, 4, 2000],
[2, 4, 2000],
[2, 5, 1300],
[1, 6, 2600],
[3, 5, 780],
[5, 4, 1000],
[3, 4, 900],
[3, 6, 1300],
[6, 7, 200],
[4, 7, 800]]
p = PowerSupply(data)
print(p.edges)
if __name__ == '__main__':
main()
|
"""
- Merge all the different data-sets into one: WGA + NGA + WikiArt
- Also save all the model data -> Split into: Training, Validation, & Testing sets
"""
import pandas as pd
import unicodedata
from sklearn.model_selection import train_test_split
from PIL import Image
import os
import numpy as np
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = os.path.join(FILE_PATH, "..", "..", "sculpture_data")
# Duplicate sculptures to be deleted from master
# This was done informally by me...I'm pretty sure I caught a vast majority of it though
# NOTE: See 'notes.txt' for more info
DUP_SCULPTURES = ["wikiart_0551.jpg", "wikiart_0411.jpg", "wga_3788.jpg", "wga_1084.jpg", "wga_1092.jpg",
"wikiart_0320.jpg",
"wikiart_0319.jpg", "nga_0062.jpg", "nga_0063.jpg", "nga_0099.jpg", "nga_0064.jpg", "nga_0066.jpg",
"nga_0067.jpg", "nga_0069.jpg", "nga_0070.jpg", "nga_0076.jpg", "nga_0071.jpg", "nga_0073.jpg",
"nga_0075.jpg", "nga_0077.jpg", "nga_0078.jpg", "nga_0081.jpg", "wikiart_0277.jpg", "nga_0082.jpg",
"nga_0083.jpg", "nga_0085.jpg", "nga_0084.jpg", "nga_0092.jpg", "nga_0086.jpg", "nga_0087.jpg",
"nga_0088.jpg", "nga_0089.jpg", "nga_0090.jpg", "nga_0091.jpg", "nga_0094.jpg", "nga_0096.jpg",
"wga_0656.jpg", "wga_0657.jpg", "wga_1246.jpg", "wga_1328.jpg", "wga_1192.jpg", "wikiart_0020.jpg",
"wga_1175.jpg", "wikiart_0035.jpg", "wga_1322.jpg", "wga_0342.jpg", "wikiart_0110.jpg",
"wikiart_0124.jpg",
"wikiart_0130.jpg", "wga_0388.jpg", "wga_0360.jpg", "wga_0379.jpg", "wikiart_0117.jpg",
"wikiart_0137.jpg",
"wga_0363.jpg", "wga_0423.jpg", "wga_0425.jpg", "wikiart_0135.jpg", "wga_0419.jpg", "wga_1751.jpg",
"wikiart_0083.jpg", "wga_2838.jpg", "wikiart_0093.jpg", "wga_2840.jpg", "wga_2862.jpg",
"wikiart_0085.jpg",
"wikiart_0103.jpg", "wga_2932.jpg", "wikiart_0107.jpg", "wikiart_0072.jpg", "wikiart_0067.jpg",
"wikiart_0068.jpg", "wikiart_0060.jpg", "wikiart_0061.jpg", "wikiart_0092.jpg", "wikiart_0078.jpg",
"wikiart_0076.jpg", "wikiart_0090.jpg", "wikiart_0089.jpg", "wikiart_0088.jpg", "wikiart_0086.jpg",
"wikiart_0082.jpg"
]
def fix_name_nga(artist):
"""
Fix the name for NGA
:param artist: artist name
:return: Fixed name
"""
if "sculptor" in artist:
return artist[:artist.find("sculptor")].strip()
else:
return artist.strip()
def fix_name_wiki(artist):
"""
Fix the name for WikiArt
:param artist: artist name
:return: Fixed name
"""
if "Alonzo Cano" in artist:
return "Alonso Cano"
if "Michelangelo" in artist:
return "Michelangelo Buonarroti"
return artist
def fix_name_wga(artist):
"""
Fix the name for WGA
:param artist: artist name
:return: Fixed name
"""
comma = artist.find(",")
return " ".join([artist[comma + 1:].strip(), artist[:comma].strip()]) if comma != -1 else artist
def fix_text(text):
"""
By 'fix' I mean deal with encoding, get rid of newlines, convert to uppercase, and strip of leading/trailing
:param text: Title or Artist name
:return: 'Fixed' text
"""
''.join((c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn'))
text = text.replace('\n', '')
text = text.upper()
return text.strip()
def get_data():
"""
Merge All the datasets into one
:return: Master DataFrame
"""
wga_df = pd.read_csv(os.path.join(MODEL_DIR, 'wga/sculptures/wga_sculpture_periods.csv'), index_col=0)
wikiart_df = pd.read_csv(os.path.join(MODEL_DIR, 'wikiart/sculptures/wikiart_sculpture_periods.csv'), index_col=0)
nga_df = pd.read_csv(os.path.join(MODEL_DIR, 'nga/sculptures/nga_sculpture_periods.csv'), index_col=0)
######## Fix name for WGA and WikiaRt ###########
wga_df['Author'] = wga_df.apply(lambda x: fix_name_wga(x['Author']), axis=1)
wikiart_df['Author'] = wikiart_df.apply(lambda x: fix_name_wiki(x['Author']), axis=1)
nga_df['Author'] = nga_df.apply(lambda x: fix_name_nga(x['Author']), axis=1)
df = pd.concat([wga_df, wikiart_df, nga_df], ignore_index=True, sort=True)
df['Author_Fixed'] = df.apply(lambda x: fix_text(x['Author']), axis=1)
df['title_fixed'] = df.apply(lambda x: fix_text(x['title']), axis=1)
periods = ["BAROQUE", "EARLY RENAISSANCE", "MEDIEVAL", "NEOCLASSICISM", "HIGH RENAISSANCE", "MINIMALISM", "REALISM",
"IMPRESSIONISM", "ROCOCO", "SURREALISM", "MANNERISM", "ROMANTICISM",
]
df['Period'] = df.apply(lambda row: row['Period'].upper(), axis=1)
# Get Desired Periods
df['Period'] = df.apply(lambda x: "SURREALISM" if "SURREALISM" in x['Period'] else x['Period'], axis=1)
df = df[(df['Period'].isin(periods))]
df = df.sort_values(['Author_Fixed', 'title_fixed'])
#print("Combined Drop Rows:", df.shape[0] - df.drop_duplicates(subset=['Author_Fixed', 'title_fixed']).shape[0])
df = df.drop_duplicates(subset=['Author_Fixed', 'title_fixed'], keep='last')
# Drop Duplicate Sculptures
df = df[~df['file'].isin(DUP_SCULPTURES)].reset_index(drop=True)
#print(df['Period'].value_counts())
return df
def save_model_data():
"""
Save all the data used to create the model in the matter I want it
:return: None
"""
print("Getting the training, validation, and testing sets...")
df = get_data()
# First read in & group by type
image_styles = {key: [] for key in df['Period'].unique()}
for pic in df.to_dict("records"):
db = pic['file'][:pic['file'].find("_")]
img = Image.open(os.path.join(MODEL_DIR, f"{db}/sculpture_images/{pic['file']}"))
img.load()
image_styles[pic['Period']].append(img)
# Split each type up...not just the whole thing
for style in image_styles.keys():
# Split into Train/Test - 75/25
feats, labels = image_styles[style], [style] * len(image_styles[style])
feat_train, feat_test, label_train, label_test = train_test_split(feats, labels, test_size=.25, random_state=42)
# Create dirs if needed
for pic_type in ['train', 'test']:
if not os.path.exists(os.path.join(MODEL_DIR, f"model_data/gan/{pic_type}/{style}")):
os.makedirs(os.path.join(MODEL_DIR, f"model_data/gan/{pic_type}/{style}"))
# Save in train/validation/test folders
for style_type_pics in [["train", feat_train], ["test", feat_test]]:
for pic in range(len(style_type_pics[1])):
file_name = style + format(pic, '03d') + ".jpg"
if not os.path.isfile(os.path.join(MODEL_DIR, f"model_data/gan/{style_type_pics[0]}/{style}/{file_name}")):
style_type_pics[1][pic].save(os.path.join(MODEL_DIR, f"model_data/gan/{style_type_pics[0]}/{style}/{file_name}"))
print("Split data for", style)
if __name__ == "__main__":
save_model_data()
|
"""An example program that uses the elsapy module"""
from elsapy.elsclient import ElsClient
from elsapy.elsprofile import ElsAuthor, ElsAffil
from elsapy.elsdoc import FullDoc, AbsDoc
from elsapy.elssearch import ElsSearch
from xml.dom.minidom import parseString
import json, re
import xml.etree.cElementTree as ET
import urllib, os, time
import threading
import numpy
import dicttoxml
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("year", help="the year to search science direct for")
args = parser.parse_args()
## Load configuration
con_file = open("config.json")
config = json.load(con_file)
con_file.close()
## Initialize client
client = ElsClient(config['apikey'])
client.inst_token = config['insttoken']
##print ("Please enter the search terms")
##s = raw_input('--> ')
## Initialize doc search object and execute search, retrieving all results
#scopus, scidir
max_Results = 400;
search_query = "biodiversity"
#search_query = "bioinformatics"
main_path = "/home/dean/phd/xmlout/"
xml_out_path = main_path + "out.xml"
loop_count = 0
### cleanText Function cleans the text to remove any characters which might confuse the NLP.
### I've kept important characters, including basic math notation and brackets etc. These will
### need to stay as I will later be analysing references and possibally math equations.
def cleanText(textToClean):
input_text = ""
if(textToClean is not None):
input_text = ''.join(e for e in textToClean if e.isalnum() or e.isspace() or e == '.' or e == ',' or e == '!' or e == '?' or e == '(' or e == ')' or e == '[' or e == ']' or e == '^' or e == '&' or e == '+' or e == '-' or e == '*' or e == '<' or e == '>' or e == '/' or e == '=' or e == '\"' or e == '\'')
input_text = re.sub(r"\n", " ", input_text)
return input_text
def checkNone(object_to_check):
if(object_to_check is not None):
return True
else:
return False
# This is a thread worker function. The data is split into slices. The number of slices
# is determined by the number of cores (N) the CPU has minus 1. This will allow the
# computer to use a core for background OS purposes.
def worker_function(arraySlice, startCount, idx):
save_path = main_path + search_query + "/"
save_text_path = main_path + search_query + "/" + str(startCount) + "/"
log_path = save_text_path + "log.txt"
# Create the path if it does not exist.
if not os.path.exists(save_text_path):
os.makedirs(save_text_path)
loop_count = startCount
total_count = len(arraySlice)
for result in arraySlice:
loop_count = loop_count+1
try:
prismdoi = result.get("prism:doi", str(loop_count))
entityID = result.get("eid")
save_file = save_path + entityID + ".xml"
output_block_text_path = save_text_path + "block_text.txt"
if os.path.isfile(save_file) is False:
article = ET.Element("article")
ET.SubElement(article, "search_query").text = search_query
ET.SubElement(article, "search_time").text = time.strftime("%c")
ET.SubElement(article, "time").text = result["prism:coverDate"][0]["$"]
ET.SubElement(article, "title").text = cleanText(result.get("dc:title",""))
with open(output_block_text_path, "a") as block_out:
block_out.write(cleanText(result.get("dc:title","")).encode('utf-8').strip() + ". \n")
if result.get("authors","") != "":
for author in result["authors"].get("author", ""):#result["authors"]["author"]:
ET.SubElement(article, "author").text = author.get("surname", "") + ", " + author.get("given-name","")
#print("\n Debug authors adding .... ", author["surname"] + ", " + author["given-name"])
ET.SubElement(article, "number").text = str(loop_count)
ET.SubElement(article, "text", section="teaser").text = cleanText(result.get("prism:teaser",""))
# Do a document search to retrieve the abstract information
abstract = "" # start with an empty variable
scp_doc = FullDoc(uri = result.get("prism:url",""))
if scp_doc.read(client):
abstract = scp_doc.abstract
if abstract is not None:
if abstract.find('Abstract') == 0:
abstract = abstract[8:]
ET.SubElement(article, "text", section="abstract").text = cleanText(abstract)
with open(output_block_text_path, "a") as block_out:
block_out.write(cleanText(abstract).encode('utf-8').strip() + "\n")
#print(scp_doc.coredata)
tree = ET.ElementTree(article)
#sem.acquire()
tree.write(save_file)
#sem.release()
#print("Saved file, EID = ", entityID)
if(loop_count%5==0):
print(str(loop_count) + " / " + str(total_count+startCount) + " parsed. Thread id = " + str(idx))
else:
print(save_file, "already exists, skipping to next : # " + str(loop_count) +"\n")
#skip
except Exception as e:
print("error on #" + str(loop_count) + " doi : " + prismdoi + ". Thread = " + str(idx) + "\n" )
print(e.__doc__, e.message)
with open(log_path, "a") as log_out:
log_out.write(e.message + "\n")
log_out.write("\n")
pass
search_year = args.year
#print(search_query + "&date=" + search_year )
doc_srch = ElsSearch(search_query + "&date=" + search_year ,'scidir')
doc_srch.execute(client, get_all = True, max_results=max_Results)
n = 6
x = numpy.array_split(doc_srch.results, n)
v = len(doc_srch.results)/n
for i in xrange(0,n):
t = threading.Thread(target=worker_function, args=(x[i],i*len(x[0]),i,))
t.start()
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import logging
from concurrent import futures
import grpc
from google.protobuf import empty_pb2
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_portal_service_pb2 as dp_pb
from fedlearner.common import data_portal_service_pb2_grpc as dp_grpc
from fedlearner.common.db_client import DBClient
from fedlearner.data_join.data_portal_job_manager import DataPortalJobManager
from fedlearner.data_join.routine_worker import RoutineWorker
class DataPortalMaster(dp_grpc.DataPortalMasterServiceServicer):
def __init__(self, portal_name, kvstore, portal_options):
super(DataPortalMaster, self).__init__()
self._portal_name = portal_name
self._kvstore = kvstore
self._portal_options = portal_options
self._data_portal_job_manager = DataPortalJobManager(
self._kvstore, self._portal_name,
self._portal_options.long_running,
self._portal_options.check_success_tag,
)
self._bg_worker = None
def GetDataPortalManifest(self, request, context):
return self._data_portal_job_manager.get_portal_manifest()
def RequestNewTask(self, request, context):
response = dp_pb.NewTaskResponse()
finished, task = \
self._data_portal_job_manager.alloc_task(request.rank_id)
if task is not None:
if isinstance(task, dp_pb.MapTask):
response.map_task.MergeFrom(task)
else:
assert isinstance(task, dp_pb.ReduceTask)
response.reduce_task.MergeFrom(task)
elif not finished:
response.pending.MergeFrom(empty_pb2.Empty())
else:
response.finished.MergeFrom(empty_pb2.Empty())
return response
def FinishTask(self, request, context):
self._data_portal_job_manager.finish_task(request.rank_id,
request.partition_id,
request.part_state)
return common_pb.Status()
def start(self):
self._bg_worker = RoutineWorker(
'portal_master_bg_worker',
self._data_portal_job_manager.backgroup_task,
lambda: True, 30
)
self._bg_worker.start_routine()
def stop(self):
if self._bg_worker is not None:
self._bg_worker.stop_routine()
self._bg_worker = None
class DataPortalMasterService(object):
def __init__(self, listen_port, portal_name,
kvstore_type, portal_options):
self._portal_name = portal_name
self._listen_port = listen_port
self._server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
kvstore = DBClient(kvstore_type, portal_options.use_mock_etcd)
self._data_portal_master = DataPortalMaster(portal_name, kvstore,
portal_options)
dp_grpc.add_DataPortalMasterServiceServicer_to_server(
self._data_portal_master, self._server
)
self._server.add_insecure_port('[::]:%d'%listen_port)
self._server_started = False
def start(self):
if not self._server_started:
self._server.start()
self._data_portal_master.start()
self._server_started = True
logging.warning("DataPortalMasterService name as %s start " \
"on port[%d]:",
self._portal_name, self._listen_port)
def stop(self):
if self._server_started:
self._data_portal_master.stop()
self._server.stop(None)
self._server_started = False
logging.warning("DataPortalMasterService name as %s"\
"stopped ", self._portal_name)
def run(self):
self.start()
self._server.wait_for_termination()
self.stop()
|
import digitalio, board, busio, adafruit_rfm9x
import time
RADIO_FREQ_MHZ = 868.
CS = digitalio.DigitalInOut( board.CE1)
RESET = digitalio.DigitalInOut( board.D25)
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
radio = adafruit_rfm9x.RFM9x( spi, CS, RESET, RADIO_FREQ_MHZ)
counter = 0;
t0 = time.perf_counter()
while(time.perf_counter() - t0 < 600):
packet = radio.receive()
#rssi = radio.last_rssi
#print("Signal strength: " + str(rssi) + " dB")
if (packet != None):
counter += 1
print(str(counter) + "\t[" + str(packet) + "]")
print("Received " + str(counter) + " packages")
|
ML_DATA_QUERY = '''
SELECT
(Atlas_of_surveillance_20201007.State || \' \' || Atlas_of_Surveillance_20201007.County),
acs2015_county_data.Black,
acs2015_county_data.TotalPop,
acs2015_county_data.Poverty,
acs2015_county_data.Men,
acs2015_county_data.Women,
acs2015_county_data.White,
acs2015_county_data.Native,
acs2015_county_data.Hispanic,
acs2015_county_data.Asian,
acs2015_county_data.Pacific,
acs2015_county_data.Income,
acs2015_county_data.Drive,
acs2015_county_data.Walk,
acs2015_county_data.Transit,
acs2015_county_data.Professional,
acs2015_county_data.WorkAtHome,
acs2015_county_data.Unemployment,
acs2015_county_data.SelfEmployed,
acs2015_county_data.Professional,
acs2015_county_data.Employed
FROM Atlas_of_Surveillance_20201007, acs2015_county_data
WHERE (acs2015_county_data.State || acs2015_county_data.County) = (Atlas_of_Surveillance_20201007.State || Atlas_of_Surveillance_20201007.County);
'''
JOIN_QUERY = '''SELECT
acs2015_county_data.White,
acs2015_county_data.TotalPop,
(Atlas_of_surveillance_20201007.State || \' \' || Atlas_of_Surveillance_20201007.County),
acs2015_county_data.Poverty
FROM
Atlas_of_Surveillance_20201007,
acs2015_county_data
WHERE
(acs2015_county_data.State || acs2015_county_data.County) = (Atlas_of_Surveillance_20201007.State || Atlas_of_Surveillance_20201007.County)
AND
Atlas_of_Surveillance_20201007.Technology = ?;
'''
DISTINCT_TECH = 'SELECT DISTINCT Technology FROM Atlas_of_Surveillance_20201007;'
COUNT_QUERY = 'SELECT * FROM acs2015_county_data INNER JOIN Atlas_of_Surveillance_20201007 ON acs2015_county_data.County = Atlas_of_Surveillance_20201007.County'
LIST_TABLES_CMD = "SELECT name FROM sqlite_master WHERE type='table';"
LIST_COLUMNS_CMD_ATLAS = "PRAGMA table_info('Atlas_of_Surveillance_20201007');"
LIST_COLUMNS_CMD_2015 = "PRAGMA table_info('acs2015_county_data');"
LIST_COLUMNS_CMD_2017 = "PRAGMA table_info('acs2017_county_data');"
SELECT_TOTAL_POP_2015 = "SELECT TotalPop from acs2015_county_data"
SELECT_BLACK_2015 = "SELECT Black from acs2015_county_data"
SELECT_STATE_2015 = "SELECT State from acs2015_county_data"
CLEAN_STATES = "UPDATE Atlas_of_Surveillance_20201007 SET State = \'%s\' WHERE State = \'%s\';"
UPDATE_COUNTIES_2015 = "UPDATE acs2015_county_data SET County = County + ' County' WHERE NOT County LIKE '%County%';"
UPDATE_COUNTIES_2017 = "UPDATE acs2017_county_data SET County = County + ' County' WHERE NOT County LIKE '%County%';"
|
tempo = int(input("Tempo: "))
veloc = int(input("Velocidade: "))
dist = veloc * tempo
litro = dist / 12
print(f"Litros = {litro}") |
"""
CP1404/CP5632 Practical
Practice and Extension Work
Converting parallel lists to a dictionary
Sample list input:
> Date: (8, 4, 2019)
> Names: ["Jack", "Jill", "Harry", "John", "Garry"]
> DOB: [(12, 4, 1999), (1, 1, 2000), (27, 3, 1982), (1, 2, 1979), (20, 11, 1992)]
"""
# Get inputs
current_year = input("What is the date today (In the format '(dd, mm, yyyy)'):")
names = input("Enter list of names:")
dates_of_birth = input("Enter list of dob in same format as date, for each person:")
# current_year = (8, 4, 2019)
# names = ["Jack", "Jill", "Harry", "John", "Garry"]
# dates_of_birth = [(12, 4, 1999), (1, 1, 2000), (27, 3, 1982), (1, 2, 1979), (20, 11, 1992)]
# Create dictionary
names_and_dob = {}
# Store names and dob in dictionary
for name in range(len(names)):
names_and_dob['{}'.format(names[name])] = dates_of_birth[name]
# Calculate age of each person and print output with name
for names, dob in names_and_dob.items():
year_difference = current_year[2] - dob[2]
month_difference = current_year[1] - dob[1]
date_difference = current_year[0] - dob[0]
print("{} is {} years old".format(names, year_difference))
|
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import pytest
from pybatfish.datamodel.primitives import VariableType
from pybatfish.question import question
from tests.conftest import COMPLETION_TYPES
# Tests for isSubRange
# These two tests will fail with original code due to typo in the code
def testInvalidSubRange():
subRange = "100, 200"
actualResult = question._isSubRange(subRange)
expectMessage = "Invalid subRange: {}".format(subRange)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidStartSubRange():
subRange = "s100-200"
actualResult = question._isSubRange(subRange)
expectMessage = "Invalid subRange start: s100"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidEndSubRange():
subRange = "100-s200"
actualResult = question._isSubRange(subRange)
expectMessage = "Invalid subRange end: s200"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidSubRange():
subRange = "100-200"
actualResult = question._isSubRange(subRange)
assert actualResult[0]
assert actualResult[1] is None
# Tests for isIp
def testInvalidIp():
ip = "192.168.11"
actualResult = question._isIp(ip)
expectMessage = "Invalid ip string: '{}'".format(ip)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidIpAddressWithIndicator():
ip = "INVALID_IP(100)"
actualResult = question._isIp(ip)
expectMessage = "Invalid ip string: '{}'".format(ip)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidIpAddressWithIndicator():
ip = "INVALID_IP(100l)"
actualResult = question._isIp(ip)
assert actualResult[0]
assert actualResult[1] is None
def testInvalidSegmentsIpAddress():
ipAddress = "192.168.11.s"
actualResult = question._isIp(ipAddress)
expectMessage = "Ip segment is not a number: 's' in ip string: '192.168.11.s'"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidSegmentRangeIpAddress():
ipAddress = "192.168.11.256"
actualResult = question._isIp(ipAddress)
expectMessage = (
"Ip segment is out of range 0-255: '256' in ip string: '192.168.11.256'"
)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidSegmentRangeIpAddress2():
ipAddress = "192.168.11.-1"
actualResult = question._isIp(ipAddress)
expectMessage = (
"Ip segment is out of range 0-255: '-1' in ip string: '192.168.11.-1'"
)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidIpAddress():
ipAddress = "192.168.1.1"
actualResult = question._isIp(ipAddress)
assert actualResult[0]
assert actualResult[1] is None
# Tests for _isPrefix
def testInvalidIpInPrefix():
prefix = "192.168.1.s/100"
actualResult = question._isPrefix(prefix)
expectMessage = "Ip segment is not a number: 's' in ip string: '192.168.1.s'"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidLengthInPrefix():
prefix = "192.168.1.1/s"
actualResult = question._isPrefix(prefix)
expectMessage = "Prefix length must be an integer"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidPrefix():
prefix = "192.168.1.1/100"
actualResult = question._isPrefix(prefix)
assert actualResult[0]
assert actualResult[1] is None
# Tests for _isPrefixRange
def testInvalidPrefixRangeInput():
prefixRange = "192.168.1.s/100:100:100"
actualResult = question._isPrefixRange(prefixRange)
expectMessage = "Invalid PrefixRange string: '{}'".format(prefixRange)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidPrefixInput():
prefixRange = "192.168.1.s/100:100"
actualResult = question._isPrefixRange(prefixRange)
expectMessage = (
"Invalid prefix string: '192.168.1.s/100' in prefix range string: '{}'".format(
prefixRange
)
)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidRangeInput():
prefixRange = "192.168.1.1/100:100-s110"
actualResult = question._isPrefixRange(prefixRange)
expectMessage = "Invalid subRange end: s110"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidPrefixRange():
prefixRange = "192.168.1.1/100:100-110"
actualResult = question._isPrefixRange(prefixRange)
assert actualResult[0]
assert actualResult[1] is None
# Tests for _isIpWildcard
def testInvalidIpWildcardWithColon():
ipWildcard = "192.168.1.s:192.168.10.10:192"
actualResult = question._isIpWildcard(ipWildcard)
expectMessage = "Invalid IpWildcard string: '{}'".format(ipWildcard)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidStartIpWildcardWithColon():
ipWildcard = "192.168.1.s:192.168.1.1"
actualResult = question._isIpWildcard(ipWildcard)
expectMessage = "Invalid ip string: '192.168.1.s'"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidEndIpWildcardWithColon():
ipWildcard = "192.168.1.1:192.168.10.s"
actualResult = question._isIpWildcard(ipWildcard)
expectMessage = "Ip segment is not a number: 's' in ip string: '192.168.10.s'"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidIpWildcardWithColon():
ipWildcard = "192.168.1.1:192.168.10.10"
actualResult = question._isIpWildcard(ipWildcard)
assert actualResult[0]
assert actualResult[1] is None
def testInvalidIpWildcardWithSlash():
ipWildcard = "192.168.1.s/192.168.10.10/192"
actualResult = question._isIpWildcard(ipWildcard)
expectMessage = "Invalid IpWildcard string: '{}'".format(ipWildcard)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidStartIpWildcardWithSlash():
ipWildcard = "192.168.1.s/s"
actualResult = question._isIpWildcard(ipWildcard)
expectMessage = "Invalid ip string: '192.168.1.s'"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testInvalidEndIpWildcardWithSlash():
ipWildcard = "192.168.1.1/s"
actualResult = question._isIpWildcard(ipWildcard)
expectMessage = "Invalid prefix length: 's' in IpWildcard string: '{}'".format(
ipWildcard
)
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidIpWildcardWithSlash():
ipWildcard = "192.168.1.1/100"
actualResult = question._isIpWildcard(ipWildcard)
assert actualResult[0]
assert actualResult[1] is None
def testInvalidIpAddressIpWildcard():
ipWildcard = "192.168.11.s"
actualResult = question._isIpWildcard(ipWildcard)
expectMessage = "Ip segment is not a number: 's' in ip string: '192.168.11.s'"
assert not actualResult[0]
assert expectMessage == actualResult[1]
def testValidIpAddressIpWildcard():
ipWildcard = "192.168.11.1"
actualResult = question._isIpWildcard(ipWildcard)
assert actualResult[0]
assert actualResult[1] is None
# Tests for validateType
def testInvalidBooleanValidateType():
result = question._validateType(1.5, "boolean")
assert not result[0]
def testValidBooleanValidateType():
result = question._validateType(True, "boolean")
assert result[0]
def testInvalidIntegerValidateType():
result = question._validateType(1.5, "integer")
assert not result[0]
def testValidIntegerValidateType():
result = question._validateType(10, "integer")
assert result[0]
def testInvalidComparatorValidateType():
result = question._validateType("<==", "comparator")
expectMessage = (
"'<==' is not a known comparator. Valid options are: '<, <=, ==, >=, >, !='"
)
assert not result[0]
assert expectMessage == result[1]
def testValidComparatorValidateType():
result = question._validateType("<=", "comparator")
assert result[0]
def testInvalidFloatValidateType():
result = question._validateType(10, "float")
assert not result[0]
def testValidFloatValidateType():
result = question._validateType(10.0, "float")
assert result[0]
def testInvalidDoubleValidateType():
result = question._validateType(10, "double")
assert not result[0]
def testValidDoubleValidateType():
result = question._validateType(10.0, "double")
assert result[0]
def testInvalidLongValidateType():
result = question._validateType(5.3, "long")
assert not result[0]
result = question._validateType(2**64, "long")
assert not result[0]
def testValidLongValidateType():
result = question._validateType(10, "long")
assert result[0]
result = question._validateType(2**40, "long")
assert result[0]
def testInvalidJavaRegexValidateType():
result = question._validateType(10, "javaRegex")
expectMessage = "A Batfish javaRegex must be a string"
assert not result[0]
assert expectMessage == result[1]
def testInvalidNonDictionaryJsonPathValidateType():
result = question._validateType(10, "jsonPath")
expectMessage = "Expected a jsonPath dictionary with elements 'path' (string) and optional 'suffix' (boolean)"
assert not result[0]
assert expectMessage == result[1]
def testInvalidDictionaryJsonPathValidateType():
result = question._validateType({"value": 10}, "jsonPath")
expectMessage = "Missing 'path' element of jsonPath"
assert not result[0]
assert expectMessage == result[1]
def testPathNonStringJsonPathValidateType():
result = question._validateType({"path": 10}, "jsonPath")
expectMessage = "'path' element of jsonPath dictionary should be a string"
assert not result[0]
assert expectMessage == result[1]
def testSuffixNonBooleanJsonPathValidateType():
result = question._validateType({"path": "I am path", "suffix": "hi"}, "jsonPath")
expectMessage = "'suffix' element of jsonPath dictionary should be a boolean"
assert not result[0]
assert expectMessage == result[1]
def testValidJsonPathValidateType():
result = question._validateType({"path": "I am path", "suffix": True}, "jsonPath")
assert result[0]
assert result[1] is None
def testInvalidTypeSubRangeValidateType():
result = question._validateType(10.0, "subrange")
expectMessage = "A Batfish subrange must either be a string or an integer"
assert not result[0]
assert expectMessage == result[1]
def testValidIntegerSubRangeValidateType():
result = question._validateType(10, "subrange")
assert result[0]
assert result[1] is None
def testNonStringProtocolValidateType():
result = question._validateType(10.0, "protocol")
expectMessage = "A Batfish protocol must be a string"
assert not result[0]
assert expectMessage == result[1]
def testInvalidProtocolValidateType():
result = question._validateType("TCPP", "protocol")
expectMessage = (
"'TCPP' is not a valid protocols. Valid options are: 'dns, ssh, tcp, udp'"
)
assert not result[0]
assert expectMessage == result[1]
def testValidProtocolValidateType():
result = question._validateType("TCP", "protocol")
assert result[0]
assert result[1] is None
def testNonStringIpProtocolValidateType():
result = question._validateType(10.0, "ipProtocol")
expectMessage = "A Batfish ipProtocol must be a string"
assert not result[0]
assert expectMessage == result[1]
def testInvalidIntegerIpProtocolValidateType():
result = question._validateType("1000", "ipProtocol")
expectMessage = "'1000' is not in valid ipProtocol range: 0-255"
assert not result[0]
assert expectMessage == result[1]
def testValidIntegerIpProtocolValidateType():
result = question._validateType("10", "ipProtocol")
assert result[0]
assert result[1] is None
def testInvalidCompletionTypes():
# TODO: simplify to COMPLETION_TYPES after VariableType.BGP_ROUTE_STATUS_SPEC is moved
for completion_type in set(COMPLETION_TYPES + [VariableType.BGP_ROUTE_STATUS_SPEC]):
result = question._validateType(5, completion_type)
expectMessage = "A Batfish " + completion_type + " must be a string"
assert not result[0]
assert result[1] == expectMessage
def testValidCompletionTypes():
values = {
VariableType.IP: "1.2.3.4",
VariableType.PREFIX: "1.2.3.4/24",
VariableType.PROTOCOL: "ssh",
}
# TODO: simplify to COMPLETION_TYPES after VariableType.BGP_ROUTE_STATUS_SPEC is moved
for completion_type in set(COMPLETION_TYPES + [VariableType.BGP_ROUTE_STATUS_SPEC]):
result = question._validateType(
values.get(completion_type, ".*"), completion_type
)
assert result[0]
assert result[1] is None
if __name__ == "__main__":
pytest.main()
|
# 54. Spiral Matrix
'''
Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.
For example,
Given the following matrix:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
You should return [1,2,3,6,9,8,7,4,5].
'''
#Array
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
if matrix==[]:
return []
res=[]
left,right,up,down=0,len(matrix[0])-1,0,len(matrix)-1
while True:
if left<=right:
for i in range(left,right+1):
res.append(matrix[up][i])
up+=1
if up>down:
break
if up<=down:
for i in range(up,down+1):
res.append(matrix[i][right])
right-=1
if left>right:
break
if right>=left:
for i in range(right,left-1,-1):
res.append(matrix[down][i])
down-=1
if up>down:
break
if down>=up:
for i in range(down,up-1,-1):
res.append(matrix[i][left])
left+=1
if left>right:
break
return res
|
from pyjak.convert import (
BinaryError, BinarySizeMismatch,
parse_int8, parse_uint8, parse_int16, parse_uint16, parse_int32,
parse_uint32, parse_int64, parse_uint64, parse_float32, parse_float64,
parse_bool, dump_int8, dump_uint8, dump_int16, dump_uint16, dump_int32,
dump_uint32, dump_int64, dump_uint64, dump_float32, dump_float64,
dump_bool)
from pyjak.order import ByteOrder
|
from django.conf.urls import patterns, url
from accounts import views
from django.conf import settings
from django.conf.urls.static import static
from django.http import HttpResponseRedirect
urlpatterns = patterns('',
url(r'^$', lambda x: HttpResponseRedirect('/a/my_account')),
url(r'^my_account/$', views.my_account, name='my_account'),
url(r'^users/(?P<u_id>[^/]+)/$', views.view_user, name='view_user'),
url(r'^create_profile/$', views.create_profile, name='create_profile'),
url(r'^edit_profile/$', views.edit_profile, name='edit_profile'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^register/$', views.user_register, name='register'),
url(r'^delete_user/$', views.deactivate_user, name='deactivate_user'),
url(r'^locations/$', views.location, name='locations'),
) #+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
|
import conducto as co
def main() -> co.Serial:
with co.Serial(image=co.Image(copy_repo=True)) as node:
node["hello"] = co.Exec("ls")
return node
co.main(default=main)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.