text stringlengths 38 1.54M |
|---|
from django.shortcuts import render
from django.http import HttpResponse
import json
import telepot
from telepot.loop import MessageLoop
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
from django.http import HttpResponseForbidden, HttpResponseBadRequest, JsonResponse
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from .models import Greeting
# Create your views here.
def index(request):
# return HttpResponse('Hello from Python!')
return render(request, "index.html")
def db(request):
greeting = Greeting()
greeting.save()
greetings = Greeting.objects.all()
return render(request, "db.html", {"greetings": greetings})
TOKEN = "777893642:AAG3I_qD-yLxWHKdJzzOaua_CwFh5k6G4ME"
TelegramBot = telepot.Bot(TOKEN)
TelegramBot.setWebhook('https://mehanat-django.herokuapp.com/bot/'.format(bot_token=TOKEN))
# Create your views here.
def index(request):
return HttpResponse('ok')
def on_callback_query(msg):
query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')
print('Callback Query:', query_id, from_id, query_data)
TelegramBot.answerCallbackQuery(query_id, text='Got it')
class CommandReceiveView(View):
def post(self, request):
print(request.body)
payloadStr = request.body.decode('utf-8')
payload = json.loads(payloadStr)
if 'callback_query' in payloadStr:
chat_id = payload['callback_query']['message']['chat']['id']
message = payload['callback_query']['data']
TelegramBot.sendMessage(chat_id, f"Вы нажали кнопку `{message}`")
else:
chat_id = payload['message']['chat']['id']
message = payload['message'].get('text')
print(message)
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text='Button1', callback_data='press1')],
[InlineKeyboardButton(text='Button2', callback_data='press2')],
])
TelegramBot.sendMessage(chat_id, 'Нажми кнопку', reply_markup=keyboard)
return JsonResponse({}, status=200)
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(CommandReceiveView, self).dispatch(request, *args, **kwargs) |
import os
import pickle
import argparse
from itertools import product, cycle
from collections import defaultdict
import numpy as np
import tikzplotlib
import graphviz as gv
import matplotlib.pyplot as plt
import polytope
def policy_evaluation(P, R, gamma, policy):
""" Policy Evaluation Solver
We denote by 'A' the number of actions, 'S' for the number of
states.
Args:
P (numpy.ndarray): Transition function as (A x S x S) tensor
R (numpy.ndarray): Reward function as a (S x A) tensor
gamma (float): Scalar discount factor
policies (numpy.ndarray): tensor of shape (S x A)
Returns:
tuple (vf, qf) where the first element is vector of length S and the second element contains
the Q functions as matrix of shape (S x A).
"""
nstates = P.shape[-1]
expanded = False
if policy.ndim < 3:
policy = np.expand_dims(policy, axis=0)
expanded = True
R = np.expand_dims(R, axis=0)
ppi = np.einsum('ast,nsa->nst', P, policy)
rpi = np.einsum('nsa,nsa->ns', R, policy)
vf = np.linalg.solve(np.eye(nstates) - gamma*ppi, rpi)
qf = R + gamma*np.einsum('ast,nt->nsa', P, vf)
if expanded is True:
vf = np.squeeze(vf)
qf = np.squeeze(qf)
return vf, qf
def make_polytope_figure(P, R, discount, rng, npolicies=100, box=None, delta=0.01, ):
nstates, nactions = P.shape[-1], P.shape[0]
random_policies = np.zeros((npolicies, nstates, nactions))
random_policies[:, :, 0] = rng.uniform(size=(npolicies, nstates))
random_policies[:, :, 1] = 1 - random_policies[:, :, 0]
fig, ax = plt.subplots()
vfs, _ = policy_evaluation(P, R, discount, random_policies)
ax.scatter(vfs[:, 0], vfs[:, 1], s=12, alpha=1., zorder=0)
state_action_cartesian_product = np.array(list(product(range(nactions), repeat=nstates)))
if box is not None:
def constraint(v0, v1, s=0, a=0):
val = discount*P[a, s, 0]*v0 + discount*P[a, s, 1]*v1
if s == 0:
return v0 - val
return v1 - val
box = np.asarray(box)
if box.ndim == 0:
s0_valrange = np.arange(-box, box, delta)
s1_valrange = s0_valrange
else:
s0_valrange = np.arange(box[0, 0], box[0, 1], delta)
s1_valrange = np.arange(box[1, 0], box[1, 1], delta)
vstate0, vstate1 = np.meshgrid(s0_valrange, s1_valrange)
for state, action in state_action_cartesian_product:
cp = ax.contour(vstate0, vstate1, constraint(vstate0, vstate1, state, action),
levels=[R[state, action]], zorder=5)
ax.clabel(cp, fmt=f"$r(s_{state}, a_{action})$", inline=1) # , fontsize=8)
deterministic_policies = np.eye(nactions)[state_action_cartesian_product]
dvfs, _ = policy_evaluation(P, R, discount, deterministic_policies)
ax.scatter(dvfs[:, 0], dvfs[:, 1], c='r', zorder=10)
return fig, ax
def mdp_to_dot(P, R, discount, bend_delta=20, draw_initial=False):
del discount
graph = gv.Digraph(
body=['d2tdocpreamble = "\\usetikzlibrary{automata}"',
'd2tfigpreamble = "\\tikzstyle{every state}= [draw=blue!50,semithick,fill=blue!20]"'],
node_attr={'style': 'state'},
edge_attr={'lblstyle': 'auto'})
graph.graph_attr['rankdir'] = 'LR'
nstates, nactions = P.shape[-1], P.shape[0]
if draw_initial is True:
for i in range(nstates):
graph.node(str(i), style="state, initial")
edge_bends = defaultdict(lambda: bend_delta)
edge_loops = defaultdict(lambda: cycle(['above', 'below']))
for a, i, j in product(range(nactions), range(nstates), range(nstates)):
if P[a, i, j] > 1e-5:
edge_spec = {'tail_name': str(i), 'head_name': str(
j), 'label': f"({a}, {R[i,a]:.3f}, {P[a,i,j]:.3g})"}
if i == j:
edge_spec['topath'] = "loop {}".format(next(edge_loops[i]))
else:
key = (min(i, j), max(i, j))
edge_spec['topath'] = "bend left={:d}".format(edge_bends[key])
edge_bends[key] += bend_delta
graph.edge(**edge_spec)
return graph.source
def dot_to_tex(dotfilename):
os.system(f"dot2tex -ftikz --tikzedgelabel -c {dotfilename}.dot > {dotfilename}.tex")
def tex_to_pdf(texfilename):
os.system(f"pdflatex {texfilename}.tex")
def export_graph(prefix, dot_code):
with open(f"{prefix}.dot", 'w') as fp:
fp.write(dot_code)
dot_to_tex(prefix)
tex_to_pdf(prefix)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('solution', type=str)
parser.add_argument('--bend', type=int, default=10)
arguments = parser.parse_args()
with open(arguments.solution, 'rb') as fp:
data = pickle.load(fp)
synthetic_transition, synthetic_reward = data['solution']
mdp = polytope.DadashiFig2d()
rng = np.random.RandomState(0)
make_polytope_figure(synthetic_transition, synthetic_reward, mdp.discount,
rng, npolicies=500) # box=[[-0.25, 0.25], [-0.1, 0.1]])
plt.savefig('synthetic_polytope.pdf')
tikzplotlib.save('synthetic_polytope.tex')
make_polytope_figure(*mdp.mdp, rng, npolicies=500) # box=[[-2.25, 1], [-2.25, 2.25]])
plt.savefig('true_polytope.pdf')
tikzplotlib.save('true_polytope.tex')
source = mdp_to_dot(synthetic_transition, synthetic_reward,
mdp.discount, bend_delta=arguments.bend)
export_graph('synthetic', source)
export_graph('true', mdp_to_dot(*mdp.mdp, bend_delta=arguments.bend))
|
from django.contrib import admin
from .models import java
admin.site.register(java)
# Register your models here.
|
#!/usr/bin/env python
from utils_new import *
def genDivs():
datesuffixes = [
('2016-01-01','1'),
('2016-01-02','1'),
('2016-01-03','1'),
('2016-01-04','0'),
('2016-01-05','0'),
('2016-01-06','0'),
('2016-01-07','0'),
('2016-01-08','0'),
('2016-01-09','1'),
('2016-01-10','1'),
('2016-01-11','0'),
('2016-01-12','0'),
('2016-01-13','0'),
('2016-01-14','0'),
('2016-01-15','0'),
('2016-01-16','1'),
('2016-01-17','1'),
('2016-01-18','0'),
('2016-01-19','0'),
('2016-01-20','0'),
('2016-01-21','0'),
]
prefix = './trainFiles_new/trainfile_'
for datesuffix in datesuffixes:
file = prefix + datesuffix[0]
f = open(file)
length = len(f.readlines())
for i in range(length):
print length
def run():
areadict = areaSliceTransDict()
destdict = areaSliceTransDict2(areadict)
datesuffixes = [
('2016-01-01','1'),
('2016-01-02','1'),
('2016-01-03','1'),
('2016-01-04','0'),
('2016-01-05','0'),
('2016-01-06','0'),
('2016-01-07','0'),
('2016-01-08','0'),
('2016-01-09','1'),
('2016-01-10','1'),
('2016-01-11','0'),
('2016-01-12','0'),
('2016-01-13','0'),
('2016-01-14','0'),
('2016-01-15','0'),
('2016-01-16','1'),
('2016-01-17','1'),
('2016-01-18','0'),
('2016-01-19','0'),
('2016-01-20','0'),
('2016-01-21','0'),
]
for datesuffix in datesuffixes:
print "Generating training file " + datesuffix[0] + "......................"
orderDataFile = "/home/work/xusiwei/ditech/season_1/training_data/order_data/order_data_" + datesuffix[0]
trafficDataFile = "/home/work/xusiwei/ditech/season_1/training_data/traffic_data/traffic_data_" + datesuffix[0]
weatherDataFile = "/home/work/xusiwei/ditech/season_1/training_data/weather_data/weather_data_" + datesuffix[0]
poiDataFile = '/home/work/xusiwei/ditech/season_1/training_data/poi_data/poi_data'
saveFile = './trainFiles_new/trainfile_' + datesuffix[0]
festival = datesuffix[1]
genSingleDataFile(areadict, destdict, orderDataFile, trafficDataFile, weatherDataFile, poiDataFile, saveFile, festival=festival)
print "Finished generating training file "
if __name__ == "__main__":
#run()
genDivs()
|
#!/usr/bin/python
import json
import socket
import sys
import time
import os
import array
import math
import sqlite3
import xdriplib
import mongo
# Datenhaendling zur Mongodatenbank. BG Values
openapsDBName='openaps.sqlite'
DBVersion=1
tableNameDBVersion='DBVersion'
tableNameWixeldata='WixelData'
tableNameSensordata='SensorData'
tableNameCalibrationdata='CalibrationData'
tableNameBGReadingsdata='BGReadingsData'
old_calibration={ "_id":0 ,"timestamp":0, "sensor_age_at_time_of_estimation":0 , "sensorid":0, "bg":0, "raw_value":0, "filtered_value":0, "age_adjusted_raw_value":0, "sensor_confidence":0, "slope_confidence":0, "raw_timestamp":0, "slope":0, "intercept":0, "distance_from_estimate":0, "estimate_raw_at_time_of_calibration":0, "estimate_bg_at_time_of_calibration":0, "uuid":'uid', "sensor_uuid":'SensorUID', "possible_bad":False, "check_in":False, "first_decay":0, "second_decay":0, "first_slope":0, "second_slope":0, "first_intercept":0, "second_intercept":0, "first_scale":0, "second_scale":0}
WixelData = {"_id":0,"TransmitterId":"00000","CaptureDateTime":0,"RelativeTime":0,"ReceivedSignalStrength":0,"RawValue":0,"TransmissionId":0,"BatteryLife":0,"UploadAttempts":0,"Uploaded":0,"UploaderBatteryLife":0,"FilteredValue":0 }
BGReadings={"mussnoch":0}
def exists():
conn = sqlite3.connect(openapsDBName)
ret=-3
try:
cur = conn.cursor()
sql='SELECT * FROM ' + tableNameDBVersion
#print "(BGReadings)(exists) SQL->" + sql
cur.execute(sql)
data = cur.fetchone()
#print "(BGReadings)(exists) Version ->" + str(data[0])
if data[0]==DBVersion:
ret=2
else:
print "(BGReadings)(exists) DBVersion vorgegeben -> " + DBVersion + ' ungleich DBVersion in DB ->' + str(data[0])
ret=-2
except sqlite3.Error, e:
print "(BGReadings)(exists) Error %s:" % e.args[0]
ret=-1
finally:
conn.close()
return ret
def initDB():
test=exists()
if test<0:
print "Weiter ->" + str(test)
conn = sqlite3.connect(openapsDBName)
sql = 'drop table if exists ' + tableNameDBVersion
print "(BGReadings) initDB SQL->" + sql
conn.execute(sql)
sql = 'create table if not exists ' + tableNameDBVersion
sql +='(version Long) '
print "(BGReadings) initDB SQL->" + sql
conn.execute(sql)
sql = 'Insert into ' + tableNameDBVersion + ' (version) VALUES (' + str(DBVersion) + ')'
print "(BGReadings) initDB SQL->" + sql
conn.execute(sql)
sql = 'drop table if exists ' + tableNameWixeldata
print "(BGReadings)(initDB) SQL->" + sql
conn.execute(sql)
sql = 'create table if not exists ' + tableNameWixeldata
sql +='(_id INTEGER PRIMARY KEY AUTOINCREMENT, '
sql +='TransmitterId String , '
sql +='CaptureDateTime Long , '
sql +='RelativeTime Long , '
sql +='ReceivedSignalStrength Long , '
sql +='Rawvalue Long , '
sql +='age_adjusted_raw_value Long , '
sql +='FilteredValue Long , '
sql +='TransmissionId Long , '
sql +='BatteryLife Long , '
sql +='UploadAttempts Long, '
sql +='Uploaded Long , '
sql +='UploaderBatteryLife Long) '
print "(BGReadings)(initDB) SQL->" + sql
conn.execute(sql)
sql = 'drop table if exists ' + tableNameSensordata
#conn.execute(sql)
sql = 'create table if not exists ' + tableNameSensordata
sql +='(_id INTEGER PRIMARY KEY AUTOINCREMENT, '
sql +='started_at Long , '
sql +='stopped_at Long , '
sql +='latest_battery_level Long , '
sql +='uuid String , '
sql +='sensor_location String ) '
print "(BGReadings)(initDB) SQL->" + sql
conn.execute(sql)
sql = "CREATE INDEX IF NOT EXISTS " +tableNameSensordata+ "_idx ON " +tableNameSensordata+ "( started_at,stopped_at )"
conn.execute(sql)
sql = 'drop table if exists ' + tableNameCalibrationdata
conn.execute(sql)
sql = 'create table if not exists ' + tableNameCalibrationdata
sql +='(_id INTEGER PRIMARY KEY AUTOINCREMENT, '
sql +=' timestamp Long, '
sql +='sensor_age_at_time_of_estimation Long ,'
sql +='sensorid Long, '
sql +='bg Long, '
sql +='raw_value Long, '
sql +='filtered_value Long, '
sql +='age_adjusted_raw_value Long, '
sql +='sensor_confidence Long, '
sql +='slope_confidence Long, '
sql +='raw_timestamp Long, '
sql +='slope Long , '
sql +='intercept Long, '
sql +='distance_from_estimate Long , '
sql +='estimate_raw_at_time_of_calibration Long, '
sql +='estimate_bg_at_time_of_calibration Long, '
sql +='uuid String , '
sql +='sensor_uuid String, '
sql +='possible_bad Boolean, '
sql +='check_in Boolean, '
sql +='first_decay Long, '
sql +='second_decay Long, '
sql +='first_slope Long, '
sql +='second_slope Long, '
sql +='first_intercept Long, '
sql +='second_intercept Long, '
sql +='first_scale Long,'
sql +='second_scale Long, '
sql +='uploaded Long ) '
print "(BGReadings)(initDB) SQL->" + sql
conn.execute(sql)
sql = "CREATE INDEX IF NOT EXISTS " +tableNameCalibrationdata+ "_idx ON " +tableNameCalibrationdata+ "( timestamp,raw_timestamp )"
conn.execute(sql)
sql = 'drop table if exists ' + tableNameBGReadingsdata
#conn.execute(sql)
sql = 'create table if not exists ' + tableNameBGReadingsdata
sql +='(_id INTEGER PRIMARY KEY AUTOINCREMENT, '
sql +='timestamp Long, '
sql +='DateTime String, '
sql +='bg Long, '
sql +='raw_value Long, '
sql +='raw_timestamp Long, '
sql +='age_adjusted_raw_value Long, '
sql +='filtered_value Long, '
sql +='sensor_age_at_time_of_estimation Long ,'
sql +='possible_bad Boolean, '
sql +='slope Long , '
sql +='intercept Long, '
sql +='sensor_confidence Long, '
sql +='uploaded Long, '
sql +='a Long, '
sql +='b Long, '
sql +='c Long, '
sql +='ra Long, '
sql +='rb Long, '
sql +='rc Long) '
conn.execute(sql)
sql = "CREATE INDEX IF NOT EXISTS " +tableNameBGReadingsdata+ "_idx ON " +tableNameBGReadingsdata+ "( timestamp,raw_timestamp )"
conn.execute(sql)
conn.commit()
conn.close()
|
class Solution:
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if not nums:
return None
max_ = 0
for i in range(len(nums)):
if nums[max_] < nums[i]:
max_ = i
root = TreeNode(x=nums[max_])
root.left = self.constructMaximumBinaryTree(nums[:max_])
root.right = self.constructMaximumBinaryTree(nums[max_+1:])
return root
|
import argparse
from sudoku.grid import Grid
def display_grid():
args = parse_args()
grid = Grid(args.filepath)
grid.print()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filepath', help='Path to file containing sudoku grid')
return parser.parse_args()
if __name__=='__main__':
display_grid() |
import msvcrt, os
from MasterOfSudoku.visual import visual
# Kleine module om relevante key-presses te lezen
keys = {75: "left", 77: "right", 72: "up", 80: "down", 8: "del", 48: 0,
49: 1, 50: 2, 51: 3, 52: 4, 53: 5, 54: 6, 55: 7, 56: 8, 57: 9, 27: "escape", 13: "enter"}
diffs = ["easy", "medium", "hard", "expert", "fiendish", "diabolic", "nightmare", "platinum blonde"]
menustrings = [" Los een sudoku op ",
" Vul zelf een sudoku in ",
" Stop het programma "]
diffstrings = [" Easy ",
" Medium ",
" Hard ",
" Expert ",
" Fiendish ",
" Diabolic ",
" Nightmare ",
" Platinum Blonde ",
" Terug naar menu "]
bar = "-------------------------------------------------------------"
footers = [" || Gebruik pijltjes om te navigeren, escape voor 't menu || ",
" || 1-9 om een getal in te vullen en 0 om leeg te maken || "]
menu = ["Menu", "Solve", "Check", "Terug"]
def key():
# Wacht tot een key word ingedrukt, vind deze en return
while 1:
if msvcrt.kbhit():
a = ord(msvcrt.getch())
if a:
try:
a = keys[a]
except:
a = "wrong key" + " " + a
finally:
return a
def interface(mode="menu"):
if mode == "menu":
sel = 0
while 1:
os.system('cls')
print("------------------------------------------")
print("|| Welkom bij mijn sudoku programma ||")
print("|| Los een sudoku op, of maak er zelf 1 ||")
print("|| Geniet, groeten Leroy ||")
print("------------------------------------------")
for i in range(0, 3):
if sel == i:
print(">", menustrings[i])
else:
print(" ", menustrings[i])
k = key()
if k == "escape":
return "quit"
elif k == "up":
sel = (sel - 1) % 3
elif k == "down":
sel = (sel + 1) % 3
elif k == "enter":
break
else:
print("Invalid key:", k)
if sel == 2:
return "quit"
if sel == 1:
return "insert"
if sel == 0:
return "select"
if mode == "difficulty":
sel = 0
while 1:
os.system('cls')
print("------------------------------------------")
print("|| Selecteer een moeilijkheidsgraad: ||")
print("------------------------------------------")
for i in range(0, 9):
if sel == i:
print(">", diffstrings[i])
else:
print(" ", diffstrings[i])
k = key()
if k == "escape":
return "menu"
elif k == "up":
sel = (sel - 1) % 9
elif k == "down":
sel = (sel + 1) % 9
elif k == "enter":
break
else:
print("Invalid key:", k)
if sel == 8:
return "menu"
else:
return diffs[sel]
else:
return "break"
def play(sudoku, header, message="", sel=(0, 0)):
esc = False
opt = 0
while 1:
os.system('cls')
print(bar)
print(header)
visual(sudoku, sel)
if not esc:
# Als we in de sudoku navigeren
for foot in footers:
print(foot)
print(message)
print(bar)
k = key()
if k == "escape":
esc = True
continue
elif k == "up":
sel = (sel[0], (sel[1] - 1) % 9)
elif k == "down":
sel = (sel[0], (sel[1] + 1) % 9)
elif k == "left":
sel = ((sel[0] - 1) % 9, sel[1])
elif k == "right":
sel = ((sel[0] + 1) % 9, sel[1])
elif k == "enter":
continue
elif k in {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}:
return [k, sel]
else:
print("Invalid key:", k)
if esc:
# Als we het menu wilden zien
print(" || ", end="")
for i in range(0, 4):
if opt == i:
print(" >", menu[i], " ", end="")
else:
print(" ", menu[i], " ", end="")
print(" ||")
k = key()
if k == "left":
opt = (opt - 1) % 4
elif k == "right":
opt = (opt + 1) % 4
elif k == "enter":
# Doe iets met de geselecteerde optie
if opt == 0:
return "break"
if opt == 1:
return "solve"
if opt == 2:
return "check"
if opt == 3:
esc = False
continue
if k == "escape":
esc = False
continue
else:
print("Invalid key:", k)
|
import ROOT
import sys
from limittools import addPseudoData
infname=sys.argv[1]
#samplesWOttH=['ttbarOther','ttbarPlusCCbar','ttbarPlusBBbar','ttbarPlusB','ttbarPlus2B','singlet','wjets','zjets','ttbarZ','ttbarW','diboson']
samplesWOttH=['ttbarOther','ttbarPlusCCbar','ttbarPlusBBbar','ttbarPlusB','ttbarPlus2B']
categories=["j4_t2","j5_t2","j4_t3","j4_t4","j5_t3","j5_tge4","jge6_t2","jge6_t3","jge6_tge4"]
disc="BDT_ljets"
sysnames=["_CMS_ttH_CSVLFUp","_CMS_ttH_CSVLFDown","_CMS_ttH_CSVHFUp","_CMS_ttH_CSVHFDown",
"_CMS_ttH_CSVHFStats1Up","_CMS_ttH_CSVHFStats1Down","_CMS_ttH_CSVLFStats1Up","_CMS_ttH_CSVLFStats1Down",
"_CMS_ttH_CSVHFStats2Up","_CMS_ttH_CSVHFStats2Down","_CMS_ttH_CSVLFStats2Up","_CMS_ttH_CSVLFStats2Down",
"_CMS_ttH_CSVCErr1Up","_CMS_ttH_CSVCErr1Down","_CMS_ttH_CSVCErr2Up","_CMS_ttH_CSVCErr2Down","_CMS_scale_jUp","_CMS_scale_jDown","_CMS_res_jUp","_CMS_res_jDown"]
addPseudoData(infname,samplesWOttH,categories,sysnames,disc)
|
# two 1 or 0 -> true if they are same
a, b = input().split()
bool = int(a) == int(b)
print("%d" %bool)
|
# Generated by Django 3.1 on 2020-09-06 09:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_id', models.CharField(blank=True, max_length=100, null=True)),
('section', models.CharField(blank=True, max_length=100, null=True)),
('subject', models.CharField(blank=True, max_length=100, null=True)),
('date', models.DateField(default=django.utils.timezone.now)),
('student', models.CharField(blank=True, max_length=100, null=True)),
('roll', models.CharField(blank=True, max_length=100, null=True)),
('attendance', models.CharField(blank=True, max_length=200, null=True)),
('created_at', models.DateField(default=django.utils.timezone.now)),
('created_by', models.CharField(blank=True, max_length=200, null=True)),
],
),
]
|
import datetime
import pytest
from src import todo, core
@pytest.mark.parametrize(
"weekday,today,expected,description",
[
(
todo.Weekday.Monday,
datetime.date(2020, 11, 16),
datetime.date(2020, 11, 16),
"On Monday a Monday todo should return today.",
),
(
todo.Weekday.Tuesday,
datetime.date(2020, 11, 16),
datetime.date(2020, 11, 10),
"On Monday, a Tuesday todo with no advance notice should return the previous Tuesday.",
),
(
todo.Weekday.Sunday,
datetime.date(2020, 11, 16),
datetime.date(2020, 11, 15),
"On Monday, a Sunday todo with no advance notice should return the previous Sunday.",
),
],
)
def test_weekday(
weekday: todo.Weekday,
today: datetime.date,
expected: datetime.date,
description: str,
) -> None:
wk = todo.Weekly(
advance_days=0,
category=core.TodoCategory.Todo,
date_added=datetime.date(2010, 1, 1),
date_completed=None,
description="test",
note="",
start_date=datetime.date(2010, 1, 1),
todo_id=1,
user_id=1,
week_day=weekday,
)
actual = wk.current_date(today=today)
assert actual == expected, description
|
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def helper(root: TreeNode) -> int:
if not root:
return 0
left = helper(root.left)
if left == -1:
return -1
right = helper(root.right)
if right == -1:
return -1
return 1 + max(left, right) if abs(left - right) <= 1 else -1
return helper(root) != -1 |
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from accounts.views import SignUp,CreateProfile,DetailProfile,VerifyProfile
app_name= 'accounts'
urlpatterns = [
url(r'^login/$',auth_views.LoginView.as_view(template_name= 'accounts/login.html'),name= 'login'),
url(r'^logout/$',auth_views.LogoutView.as_view(),name = 'logout'),
url(r'^signup/$',SignUp.as_view(),name = 'signup'),
url(r'^(?P<username>[-\w\d]+)/profile/create', CreateProfile.as_view(), name='createprofile'),
url(r'^(?P<username>[-\w]+)/profile/(?P<pk>\d+)/', DetailProfile.as_view(), name='profile'),
url(r'^(?P<username>[-\w]+)/profile/',VerifyProfile , name='verifyprofile'),
]
|
#!/usr/bin/python
#FIRST remome all <row> s
import xml.etree.ElementTree as et
from functools import reduce
def pc(s):
#PascalCase
s=s.text.strip()
return ' '.join(list(map(lambda w:w[0].upper()+w[1:],s.split(' ')))).strip()
#return reduce(lambda s,x:(s+' '+x).strip(),list(map(lambda w:w[0].upper()+w[1:],s.split(' '))))
#def entry(e):
# return list([pc(e[0]),list(map(lambda li:pc(li[0]),e[1]))])
def entry(e):
return (pc(e[0]),list(map(lambda li:pc(li[0]),e[1])))
def xml_to_dict(xml_file):
tree=et.parse(xml_file)
root=tree.getroot()
return dict(list(map(entry,root[0][0][0])))
|
#!/usr/bin/python3
# pybuster
# A dir buster clone that doesn't derp out when a connection fails.
#
# Laurance Yeomans 2018
#
# Why:
# dirb has a sad when it fails and stops.
# This adds a 5 sec time out before trying again.
#
# No license. Do whatever with it.
import requests
import sys
import time
import signal
# Using dirb's default wordlist. Change to preferred default list.
# Also presuming this is being used within a default Kali environment
default_wordlist = '/usr/share/dirb/wordlists/common.txt'
def sigint_handler(signal,frame):
print("\n\nCtrl + C caught. Terminating.")
sys.exit(1)
def show_usage():
print("Usage: pybuster -u target_url [-w wordlist] [-o outfile]")
def process_args():
ret_args = dict()
ret_args['url'] = ''
ret_args['wordlist'] = default_wordlist
ret_args['outfile'] = ''
if len(sys.argv) < 1:
ret_args['show_usage'] = True
else:
ret_args['show_usage'] = False
for i in range(0,len(sys.argv)):
if sys.argv[i] == '-o':
ret_args['outfile'] = sys.argv[i+1]
elif sys.argv[i] == '-w':
ret_args['wordlist'] = sys.argv[i+1]
elif sys.argv[i] == '-u':
url_test = sys.argv[i+1]
if url_test[len(url_test)-1] != '/':
ret_args['url'] = url_test + '/'
else:
ret_args['url'] = url_test
return ret_args
def main():
signal.signal(signal.SIGINT,sigint_handler) # Catch Ctrl + C
args = process_args()
if args['show_usage']:
show_usage()
sys.exit(0)
if args['url'] == '':
print("No target URL specified. Please specify with -u parameter.\n")
show_usage()
sys.exit(0)
try:
with open(args['wordlist'],'r') as f_wordlist:
# pylint: disable=unused-argument
for i,l in enumerate(f_wordlist):
pass
word_count = i + 1
except IOError:
print("Error: Unable to open {0}.".format(args['wordlist']))
sys.exit(1)
print(" |------------|")
print(" | pybuster |")
print(" |------------|\n")
print("Starting tests with the following:")
print(" URL: {0}\n Wordlist: {1}\n Word count: {2}\n".format(args['url'],args['wordlist'],word_count))
with open(args['wordlist'],'r') as f_wordlist:
# hack job to clear buffer text ...
hack_job_string = ' ' * 25
# lines = [line.rstrip('\n') for line in f_wordlist]
if args['outfile'] != '':
f_outfile = open(args['outfile'],'w')
f_outfile_opened = True
else:
f_outfile_opened = False
for raw_line in f_wordlist:
if raw_line[0] == '#':
done = True
line = raw_line.rstrip('\n')
done = False
retry_count = 1
while not done:
try:
sys.stdout.write("\r--> Testing: {0}{1}".format(line,hack_job_string))
sys.stdout.flush()
r = requests.get(args['url'] + line)
if r.status_code == 404:
done = True
break
else:
print("\n * [Found: {0}] [CODE: {1}]".format(args['url']+line,r.status_code))
if f_outfile_opened:
f_outfile.write("[Found: {0}] [CODE: {1}]\n".format(args['url']+line,r.status_code))
done = True
except:
if retry_count == 1:
print("\n")
err_msg = "\r ! ConnectionError testing {0}: Trying again in 5 sec. (Retry count: {1})".format(line,retry_count)
sys.stdout.write(err_msg)
sys.stdout.flush()
time.sleep(5)
sys.stdout.write('\b' * (len(err_msg)-1)) # Clear the err msg before testing again.
retry_count += 1
print("\n\nDone!")
if f_outfile_opened:
f_outfile.close()
if __name__ == "__main__":
main()
|
"""
Write a program that contains a function called drawRegularPolygon where you
give it a Turtle, the number of sides of the polygon, and the side length and it
draws the polygon for you
"""
import turtle
def draw_regular_polygon(t, n_sides, side_length):
i = 0
while i < n_sides:
t.forward(side_length)
t.left(360/n_sides)
i += 1
def main():
t_1 = turtle.Turtle()
t_2 = turtle.Turtle()
t_3 = turtle.Turtle()
screen = t_1.getscreen()
draw_regular_polygon(t_1, 3, 30)
draw_regular_polygon(t_2, 5, 60)
draw_regular_polygon(t_3, 8, 40)
screen.exitonclick()
if __name__ == "__main__":
main() |
n = int(raw_input())
for _ in xrange(n):
name, started, dob, courses = raw_input().split()
if int(started[:4]) >= 2010:
print "%s eligible" % name
elif int(dob[:4]) >= 1991:
print "%s eligible" % name
elif int(courses) > 40:
print "%s ineligible" % name
else:
print "%s coach petitions" % name
|
import numpy as np
import pandas as pd
def l2_normalization(vectors):
return vectors / np.linalg.norm(vectors, ord=2, axis=1).reshape(vectors.shape[0], 1)
def combine_content2vec_and_skill2vec_one_taxonomy(content2vec_path, skill2vec_path, output_path):
content2vec = pd.read_csv(content2vec_path, index_col=0)
skill2vec = pd.read_csv(skill2vec_path, index_col=0)
combined = pd.merge(content2vec, skill2vec, left_index=True, right_index=True, how="outer")
assert combined.shape[0] == content2vec.shape[0] == skill2vec.shape[0]
combined.columns = [str(x) for x in range(combined.shape[1])]
combined = l2_normalization(combined)
combined.to_csv(output_path)
def combine_content2vec_and_skill2vec_two_taxonomies(
src_content2vec_path, src_skill2vec_path, src_output_path,
dst_content2vec_path, dst_skill2vec_path, dst_output_path):
combine_content2vec_and_skill2vec_one_taxonomy(src_content2vec_path, src_skill2vec_path, src_output_path)
combine_content2vec_and_skill2vec_one_taxonomy(dst_content2vec_path, dst_skill2vec_path, dst_output_path)
|
import threading
from ftplib import FTP
import sys
def save_ftp( filename , my_ftp_url ,my_ftp_username ,my_ftp_password , my_ftp_remote_path , my_local_path):
ftp = FTP(my_ftp_url )
ftp.login(my_ftp_username, my_ftp_password)
ftp.cwd(my_ftp_remote_path)
file = open(my_local_path+filename,'rb')
ftp.storbinary('STOR '+filename, file , 3000000)
file.close()
ftp.quit()
file_completed.append(filename)
if __name__ == "__main__":
my_ftp_url= sys.argv[2]
my_ftp_username= sys.argv[3]
my_ftp_password = sys.argv[4]
my_ftp_path = sys.argv[5]
my_ftp_path = my_ftp_path.replace("+-*", " ")
my_local_path = sys.argv[6]
my_local_path = my_local_path.replace("+-*", " ")
file_csv = sys.argv[1]
file_array = file_csv.split(',')
file_completed = []
threads = []
rem_files = list(set(file_array) - set(file_completed) )
while(len(rem_files) > 0):
for file_name in rem_files:
thread_task = threading.Thread(target=save_ftp, args=(file_name ,my_ftp_url , my_ftp_username ,my_ftp_password , my_ftp_path , my_local_path))
thread_task.start()
threads.append(thread_task)
for i in range(len(threads)):
threads[i].join()
rem_files = list(set(file_array) - set(file_completed) )
completed_csv = ','.join(file_completed)
print(completed_csv)
|
lst = ["Vienna", "London", "Paris", "Berlin", "Zurich", "Hamburg"]
fruitList = {"Apple","manago"}
for f in fruitList:
print(f) |
"""
Defines how the style of the input text is to be parsed into a complete book.
This depends upon the formatting of the input, and needs to be customized
based on the style that it was originally written.
"""
class StyleSheet(object):
def __init__(self):
object.__init__(self)
def update_section(self, section):
"""Updates the style and text in the section pulled from the book. Returns it as a complete
section, or """
raise NotImplementedError()
def start_parser(self):
"""Signal to start the section parsing."""
raise NotImplementedError()
def end_parser(self):
"""End parsing, and return the final elements (list of them)."""
raise NotImplementedError()
|
from django.contrib import admin
from .models import FishDist, FishAsso, PrvType, Prvs
# Register your models here.
admin.site.register(FishDist)
admin.site.register(FishAsso)
admin.site.register(PrvType)
admin.site.register(Prvs) |
from __future__ import absolute_import
import uuid
from datetime import datetime
from enum import Enum
from sqlalchemy import Column, DateTime, ForeignKey, String, Text, Integer
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Index, UniqueConstraint
from sqlalchemy.sql import func, select
from changes.config import db
from changes.constants import SelectiveTestingPolicy, Status, Result, Cause
from changes.db.types.enum import Enum as EnumType
from changes.db.types.guid import GUID
from changes.db.types.json import JSONEncodedDict
from changes.db.utils import model_repr
class BuildPriority(Enum):
default = 0
high = 100
low = -100
class Build(db.Model):
"""
Represents the work we do (e.g. running tests) for one diff or commit (an
entry in the source table) in one particular project
Each Build contains many Jobs (usually linked to a JobPlan).
"""
__tablename__ = 'build'
__table_args__ = (
Index('idx_buildfamily_project_id', 'project_id'),
Index('idx_buildfamily_author_id', 'author_id'),
Index('idx_buildfamily_source_id', 'source_id'),
Index('idx_build_latest', 'project_id', 'status', 'date_created'),
UniqueConstraint('project_id', 'number', name='unq_build_number'),
)
id = Column(GUID, primary_key=True, default=uuid.uuid4)
number = Column(Integer)
project_id = Column(GUID, ForeignKey('project.id', ondelete="CASCADE"), nullable=False)
# A unqiue identifier for a group of related Builds, such as all Builds created by a particular
# action. Used primarily for aggregation in result reporting.
# Note that this may be None for Builds that aren't grouped, and all such Builds should NOT
# be treated as a collection.
collection_id = Column(GUID)
source_id = Column(GUID, ForeignKey('source.id', ondelete="CASCADE"))
author_id = Column(GUID, ForeignKey('author.id', ondelete="CASCADE"))
cause = Column(EnumType(Cause), nullable=False, default=Cause.unknown)
# label is a short description, typically from the title of the change that triggered the build.
label = Column(String(128), nullable=False)
# short indicator of what is being built, typically the sha or the Phabricator revision ID like 'D90885'.
target = Column(String(128))
tags = Column(ARRAY(String(16)), nullable=True)
status = Column(EnumType(Status), nullable=False, default=Status.unknown)
result = Column(EnumType(Result), nullable=False, default=Result.unknown)
selective_testing_policy = Column(EnumType(SelectiveTestingPolicy), default=SelectiveTestingPolicy.disabled)
message = Column(Text)
duration = Column(Integer)
priority = Column(EnumType(BuildPriority), nullable=False,
default=BuildPriority.default, server_default='0')
date_started = Column(DateTime)
date_finished = Column(DateTime)
date_decided = Column(DateTime) # date when final determination of build result is made
date_created = Column(DateTime, default=datetime.utcnow)
date_modified = Column(DateTime, default=datetime.utcnow)
data = Column(JSONEncodedDict)
project = relationship('Project', innerjoin=True)
source = relationship('Source', innerjoin=True)
author = relationship('Author')
stats = relationship('ItemStat',
primaryjoin='Build.id == ItemStat.item_id',
foreign_keys=[id],
uselist=True)
__repr__ = model_repr('label', 'target')
def __init__(self, **kwargs):
super(Build, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
if self.result is None:
self.result = Result.unknown
if self.status is None:
self.status = Status.unknown
if self.selective_testing_policy is None:
self.selective_testing_policy = SelectiveTestingPolicy.disabled
if self.date_created is None:
self.date_created = datetime.utcnow()
if self.date_modified is None:
self.date_modified = self.date_created
if self.date_started and self.date_finished and not self.duration:
self.duration = (self.date_finished - self.date_started).total_seconds() * 1000
if self.number is None and self.project:
self.number = select([func.next_item_value(self.project.id.hex)])
if self.tags is None:
self.tags = []
|
#Penn State Abington
#IST 440W
#Fall 2016
#Team Pump Your Brakes
#Members: Abu Sakif, David Austin, Qili Jian, Abu Chowdhury, Gary Martorana, Chakman Fung
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(11,GPIO.OUT)
p = GPIO.PWM(11,50)#PWM'Pulse-width Modulation' puts pin 11 to 50Hz
p.start(9)
try :
while True :
p.ChangeDutyCycle(5)#engage brake
except KeyboardInterrupt:
exit()
|
#!/usr/bin/env python
# encoding: utf-8
# author: AlisaAlbert
# 2019/5/19 12:26
import pandas as pd
import numpy as np
import pickle
import time,os
from multiprocessing import Pool
import pymysql
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 100)
# 数据库连接函数
def connect_db():
DB_Host = '***'
DB_User = '***'
DB_Pwd = '***'
DB_Name = '***'
DB_Port = 3306
db_connect = pymysql.connect(host=DB_Host, user=DB_User, password=DB_Pwd, database=DB_Name, port=DB_Port,
charset='utf8')
return db_connect
#单进程读取文件夹中的单份文件
def read_data(path):
start = time.time()
with open(path, 'rb') as f:
filename = pickle.load(f)
end = time.time()
print('Task runs %0.2f seconds.' % ((end - start)))
return filename
#向数据库插入数据
def insert_data(db_connect, result, table):
cursor = db_connect.cursor()
#转换数据格式,插入数据库
static_result_df1 = np.array(result).tolist()
static_result_df2 = list(map(tuple, static_result_df1))
sql_truncate = "truncate {};".format(table)
sql_insert = '''
insert into {}
(columns_name
) values
(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
'''.format(table)
try:
# 执行sql语句
cursor.execute(sql_truncate)
cursor.executemany(sql_insert, static_result_df2)
# 执行sql语句
cursor.commit()
print("Done Task!")
except:
# 发生错误时回滚
cursor.rollback()
cursor.close()
if __name__=='__main__':
#开启进程,与逻辑核保持一致
connect_db = connect_db()
filepath = r'D:\filename'
table = 'table_name'
t1 = time.time()
pro_num = 10 #进程数
pool = Pool(processes = pro_num)
job_result = []
#遍历文件夹读取所有文件
for file in os.listdir(filepath):
filename = filepath + '\\' + file
res = pool.apply_async(read_data, (filename,))
job_result.append(res)
pool.close() #关闭进程池
pool.join()
#合并所有读取的文件
get_result = pd.DataFrame()
for tmp in job_result:
get_result = get_result.append(tmp.get())
t2 = time.time()
insert_data(connect_db, get_result, table)
print('It took a total of %0.2f seconds.' % (t2 - t1)) |
#!/usr/bin/env python
import sys, math
def changeBaseFromTen(num,base):
digits = []
while num > 0:
digits.insert(0, str(num % base))
num /= base
return "".join(digits)
def isHappy(num,base):
thisNum = changeBaseFromTen(num,base)
numsPassed = [thisNum]
while (thisNum != "1"):
digits = map(int,thisNum)
sqDigits = map(lambda x: pow(x,2), digits)
thisNum = sum(sqDigits)
thisNum = changeBaseFromTen(thisNum,base)
if thisNum in numsPassed:
return False
else:
numsPassed.append(thisNum)
return True
n = int(sys.stdin.readline().strip())
for i in range(n):
bases = map(int,sys.stdin.readline().strip().split(" "))
res = 0
for j in range(2,10000000):
found = True
for base in bases:
if not isHappy(j,base):
found = False
break
if found:
res = j
break
print "Case #%d: %d" % (i+1, res)
|
import urllib.request,re
import random
def ip():
thisurl = "https://seofangfa.com/proxy/"
date = urllib.request.urlopen(thisurl).read().decode("utf-8", "ignore")
pat = '((\\d+\\.\\d+\\.\\d+\\.\\d+).*?(\\d+))'
res1 = re.compile(pat, re.S).findall(date)
thisip =[]
for i in res1:
res = re.sub(r'</td><td>',':',i[0])
thisip.append(res)
#print(thisip)
thisip = random.choice(thisip)
print(thisip)
proxy = urllib.request.ProxyHandler({"http": thisip}) # 转成特定格式
opener = urllib.request.build_opener(proxy, urllib.request.HTTPHandler) # 将代理ip添加到系统
urllib.request.install_opener(opener)
for i in range(0,20):
try:
ip()
url = "http://www.taizhou.com.cn/index.htm"
data1 = urllib.request.urlopen(url).read()
data = data1.decode("utf-8", "ignore")
print(len(data))
fh = open("C:\\Users\\Administrator\\Desktop\\1\\新建文件夹\\1.html", "w", encoding='utf-8')
fh.write(data)
fh.close()
except Exception as err:
print(err)
|
import turtle
from turtle import *
import random
#make a screen
win = turtle.Screen()
win.bgcolor("black")
win.setup(800,600)
#make a turtle
ruba = turtle.Turtle()
ruba.color("yellow")
ruba.shapesize(2,2)
ruba.speed(0)
#color list for the square:
color_list = ["yellow", "gold", "orange", "red", "maroon", "violet", "magenta", "purple", "navy", "blue", "skyblue", "cyan", "turquoise", "lightgreen", "green", "darkgreen", "chocolate", "brown", "gray", "white"]
#distance between each square
x = 10
#the loop!
#while True: #using while true it becomes an infinite loop! The squares keep printing out!
for i in range(50):
random.shuffle(color_list)
ruba.color(color_list[0])
ruba.lt(90)
ruba.fd(x)
x += 10
turtle.exitonclick()
win.mainloop()
#in this program we have used total 20 colors, by random module, previously we could use only 6
|
#
# This is a primitive script to parse the output of cdfIntegrate, and
# to generate a set of files (one for each track) that contains some of
# the integration results.
#
# It is intended as an example from which more useful scripts can be
# generated.
#
import sys, re, regex, regsub, math
from math import sqrt, atan2
def usage():
print "Usage: analyze <filename>"
print "<filename> must be the name of an output file created by cdfIntegrate"
if __name__ == "__main__":
# Get the name of the input file.
numArgs = len(sys.argv)
if (numArgs != 2):
usage()
sys.exit(0)
# Read the input file
filename = sys.argv[1]
try:
f = open(filename, "r")
except:
print "Could not open the file: %s, exiting" % (filename)
sys.exit(1)
ofilename = filename + "0"
print "Opening file: %s" % (ofilename)
sys.stdout = open(ofilename, "w")
whereAmI = 0
theta = 0
r_previous = 0
intRadLengthsAtStart = 0
intRadLengthsAtEnd = 0
for line in f.readlines():
if(whereAmI==0):
c = re.search('CFPL',line)
if(not c==None):
whereAmI = 1
if (whereAmI==1):
c = re.search('integral so far: *(-?[0-9.]+)',line)
if(not c==None):
intRadLengthsAtStart = float(c.group(1))
whereAmI = 2
if (whereAmI==2):
c = re.search('CSPR',line)
if (not c==None):
whereAmI = 3
if (whereAmI==3):
c = re.search('integral so far: *(-?[0-9.]+)',line)
if(not c==None):
intRadLengthsAtEnd = float(c.group(1))
whereAmI = 4
d = re.search('coordinates of end: *\((-?[0-9.]+),(-?[0-9.]+),(-?[0-9.]+)\)',line)
if (d!=None):
x = float(d.group(1))
y = float(d.group(2))
z = float(d.group(3))
r = sqrt(x**2+y**2)
if(r<r_previous):
print "%g %g" % (theta,intRadLengthsAtEnd-intRadLengthsAtStart)
whereAmI = 0
r_previous = r
if(r>0):
theta = atan2(r,z)*180./3.141592
print "%g %g" % (theta,intRadLengthsAtEnd-intRadLengthsAtStart)
|
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
from oslo_serialization import jsonutils as json
from tempest.api.keyvalue.rest_base.base import MagnetoDBTestCase
from tempest.test import attr
class MagnetoDBStreamingTest(MagnetoDBTestCase):
@classmethod
def setUpClass(cls):
super(MagnetoDBStreamingTest, cls).setUpClass()
def random_name(self, length):
return ''.join(random.choice(string.lowercase + string.digits)
for i in range(length))
@attr(type='BW-1')
def test_streaming(self):
table_name = self.random_name(40)
self._create_test_table(self.smoke_attrs + self.index_attrs,
table_name,
self.smoke_schema,
wait_for_active=True)
item_count = 100
items = [
self.build_smoke_item('forum{}'.format(n),
'subject{}'.format(n),
last_posted_by='Bulk{}'.format(n))
for n in xrange(item_count)
]
key = {self.hashkey: items[-1][self.hashkey],
self.rangekey: items[-1][self.rangekey]}
upload_status, upload_resp = self.streaming_client.upload_items(
table_name, items)
read_ = upload_resp['read']
processed_ = upload_resp['processed']
failed_ = upload_resp['failed']
unprocessed_ = upload_resp['unprocessed']
self.assertEqual(read_, item_count)
self.assertEqual(processed_, item_count)
self.assertEqual(failed_, 0)
self.assertEqual(unprocessed_, 0)
attributes_to_get = ['last_posted_by']
get_resp = self.client.get_item(table_name,
key,
attributes_to_get,
True)
self.assertEqual('Bulk{}'.format(item_count - 1),
get_resp[1]['item']['last_posted_by']['S'])
@attr(type='BW-2')
def test_streaming_error_data(self):
table_name = self.random_name(40)
self._create_test_table(self.smoke_attrs + self.index_attrs,
table_name,
self.smoke_schema,
wait_for_active=True)
item_count = 100
items = [
self.build_smoke_item('forum{}'.format(n),
'subject{}'.format(n),
last_posted_by='Bulk{}'.format(n))
for n in xrange(item_count)
]
del items[-2]['subject']
key = {self.hashkey: items[-3][self.hashkey],
self.rangekey: items[-3][self.rangekey]}
upload_status, upload_resp = self.streaming_client.upload_items(
table_name, items)
read_ = upload_resp['read']
processed_ = upload_resp['processed']
failed_ = upload_resp['failed']
unprocessed_ = upload_resp['unprocessed']
self.assertEqual(read_, item_count)
self.assertEqual(processed_ + failed_ + unprocessed_, item_count)
self.assertEqual(failed_, 1)
attributes_to_get = ['last_posted_by']
get_resp = self.client.get_item(table_name,
key,
attributes_to_get,
True)
self.assertEqual('Bulk{}'.format(item_count - 3),
get_resp[1]['item']['last_posted_by']['S'])
@attr(type='BW-3')
def test_streaming_nolf(self):
table_name = self.random_name(40)
self._create_test_table(self.smoke_attrs + self.index_attrs,
table_name,
self.smoke_schema,
wait_for_active=True)
item = self.build_smoke_item('forum{}'.format(0),
'subject{}'.format(0),
last_posted_by='Bulk{}'.format(0))
key = {self.hashkey: item[self.hashkey],
self.rangekey: item[self.rangekey]}
stream = json.dumps(item)
upload_status, upload_resp = self.streaming_client.upload_raw_stream(
table_name, stream)
read_ = upload_resp['read']
processed_ = upload_resp['processed']
failed_ = upload_resp['failed']
unprocessed_ = upload_resp['unprocessed']
self.assertEqual(read_, 1)
self.assertEqual(processed_, 1)
self.assertEqual(failed_, 0)
self.assertEqual(unprocessed_, 0)
attributes_to_get = ['last_posted_by']
get_resp = self.client.get_item(table_name,
key,
attributes_to_get,
True)
self.assertEqual('Bulk{}'.format(0),
get_resp[1]['item']['last_posted_by']['S'])
@attr(type='BW-4')
def test_streaming_bad_stream(self):
table_name = self.random_name(40)
self._create_test_table(self.smoke_attrs + self.index_attrs,
table_name,
self.smoke_schema,
wait_for_active=True)
item_count = 100
items = [
self.build_smoke_item('forum{}'.format(n),
'subject{}'.format(n),
last_posted_by='Bulk{}'.format(n))
for n in xrange(item_count)
]
key = {self.hashkey: items[0][self.hashkey],
self.rangekey: items[0][self.rangekey]}
stream = ''.join([json.dumps(item) + '\n' for item in items])
stream = stream[:len(stream)/2]
upload_status, upload_resp = self.streaming_client.upload_raw_stream(
table_name, stream)
read_ = upload_resp['read']
processed_ = upload_resp['processed']
failed_ = upload_resp['failed']
unprocessed_ = upload_resp['unprocessed']
self.assertEqual(processed_ + failed_ + unprocessed_, read_)
attributes_to_get = ['last_posted_by']
get_resp = self.client.get_item(table_name,
key,
attributes_to_get,
True)
self.assertEqual('Bulk{}'.format(0),
get_resp[1]['item']['last_posted_by']['S'])
|
from skimage import io,color
import matplotlib.pyplot as plt
def negative_image(img):
return 255 - img
img = io.imread('./images/negative_image.jpg')
img_gray = color.rgb2gray(img)
negative_img = negative_image(img_gray)
plt.figure(1)
plt.subplot(1,2,1)
plt.imshow(img)
plt.title('Original Image')
plt.subplot(1,2,2)
plt.imshow(negative_img, cmap='gray')
plt.title('Negative Image')
plt.show()
|
# encoding: UTF-8
import datetime
def get_modified_date():
'''
获取当前时间的字符串形式
:return:
'''
modified_date = datetime.datetime.now()
modified_date = modified_date.strftime("%Y-%m-%d %H:%M:%S")
return modified_date
def split_dt_list(start_dt, end_dt):
'''
根据输入的日期,输出分割的日期,按照月分割
:param dt:
:return:
'''
# s = datetime.datetime.strptime(start_dt, "%Y-%m-%d %H:%S:%M")
# end_dt = datetime.datetime.strptime(end_dt, "%Y-%m-%d %H:%S:%M")
s = datetime.datetime.strptime(start_dt, "%Y%m%d")
end_dt = datetime.datetime.strptime(end_dt, "%Y%m%d")
r_data = []
while True:
e = s + datetime.timedelta(days=30)
if e >= end_dt:
e = end_dt
e = e.replace(hour=23, minute=59)
r_data.append((s.strftime("%Y-%m-%d %H:%M:%S"), e.strftime("%Y-%m-%d %H:%M:%S")))
break
e = e.replace(hour=23, minute=59)
r_data.append((s.strftime("%Y-%m-%d %H:%M:%S"), e.strftime("%Y-%m-%d %H:%M:%S")))
s = e.replace(hour=0, minute=0) + datetime.timedelta(days=1)
return r_data |
import pygame
from pygame import *
class Player():
def __init__(self,x,y,w,h,vel,sprite,s_w,s_h):
self.x=x
self.y=y
self.w=w
self.h=h
self.vel=vel
self.sprite=sprite
self.s_w=s_w
self.s_h=s_h
self.walkcount=0
self.hitbox=pygame.Rect(self.x,self.y+30,self.w,self.h-30)
def move(self):
keys=pygame.key.get_pressed()
if keys[K_UP] or keys[K_w]:
if self.y>0:
self.y-=self.vel
elif keys[K_DOWN] or keys[K_s]:
if self.y<self.s_h-self.h:
self.y+=self.vel
elif keys[K_RIGHT] or keys[K_d]:
if self.x<self.s_w:
self.x+=self.vel
elif keys[K_LEFT] or keys[K_a]:
if self.x>0:
self.x-=self.vel
self.walkcount+=1
def show(self,screen):
if self.walkcount +1 >= 6:
self.walkcount=0
cur_frame=self.sprite[self.walkcount//3]
screen.blit(pygame.transform.scale(cur_frame,(self.w,self.h)),(self.x,self.y))
self.hitbox=pygame.Rect(self.x,self.y+30,self.w,self.h-30)
#pygame.draw.rect(screen,(255,0,0),self.hitbox,1)
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# harmonictook.py - Main game file
import math
import random
import utility
import argparse
import unittest
import statistics
class Player(object):
def __init__(self, name = "Player"):
self.name = name
self.order = 0
self.isrollingdice = False
self.abilities = 0
self.bank = 3 # Everyone starts with 3 coins
self.deck = PlayerDeck(self)
self.hasTrainStation = False
self.hasShoppingMall = False
self.hasAmusementPark = False
self.hasRadioTower = False
def isWinner(self):
if self.hasAmusementPark and self.hasRadioTower and self.hasShoppingMall and self.hasTrainStation:
return True
else:
return False
def dieroll(self):
self.isrollingdice = True
isDoubles = False
dice = self.chooseDice()
if dice == 1:
return random.randint(1,6), False
elif dice == 2:
a = random.randint(1,6)
b = random.randint(1,6)
if a == b:
isDoubles = True
else:
isDoubles = False
total = a + b
return total, isDoubles
else:
return 7, False
def chooseDice(self):
return 1
def deposit(self, amount: int):
self.bank += amount
def deduct(self, amount: int): # Deducts coins from player's account without going negative
if self.bank >= amount:
deducted = amount
else:
deducted = self.bank
self.bank -= deducted
return deducted # ...and returns the amount that was deducted, for payment purposes
def buy(self, name: str, availableCards):
card = None
specials = self.checkRemainingUpgrades()
# Check if the name passed in is on the card list or specials list
for item in availableCards.deck:
if item.name.lower() == name.lower():
card = item
break
else:
pass
for item in specials:
if item.name.lower() == name.lower():
card = item
break
else:
pass
if isinstance(card, Card):
if self.bank >= card.cost:
self.deduct(card.cost)
self.deck.append(card)
card.owner = self
print("{} bought a {} for {} coins, and now has {} coins.".format(self.name, card.name, card.cost, self.bank))
else:
print("Sorry: a {} costs {} and {} only has {}.".format(card.name, card.cost, self.name, self.bank))
return
if isinstance(card,(Red, Green, Blue, TVStation, Stadium, BusinessCenter)):
availableCards.deck.remove(card)
elif isinstance(card, UpgradeCard):
specials.remove(card)
card.bestowPower() # TODO: write setSpecialFlag()
else:
print("Sorry: we don't have anything called '{}'.".format(name))
def checkRemainingUpgrades(self):
upgrades = []
#TODO should I just define a second Store() called Player.upgrades() and put these in Player.special.deck?
if not self.hasTrainStation:
upgrades.append(UpgradeCard("Train Station"))
if not self.hasShoppingMall:
upgrades.append(UpgradeCard("Shopping Mall"))
if not self.hasAmusementPark:
upgrades.append(UpgradeCard("Amusement Park"))
if not self.hasRadioTower:
upgrades.append(UpgradeCard("Radio Tower"))
return upgrades
def swap(self, Card, otherPlayer, otherCard):
Card.owner = otherPlayer
otherCard.owner = self
otherPlayer.deck.remove(otherCard)
self.deck.append(otherCard)
self.deck.remove(Card)
otherPlayer.deck.append(Card)
class Human(Player): # TODO : make this more robust - type checking etc.
def chooseCard(self, options=list):
if len(options) == 0:
print("Oh no - no valid purchase options this turn.")
return None
else:
cardname = utility.userChoice(options)
return cardname
def chooseDice(self):
dice = 1
chosen = False
if self.hasTrainStation:
while not chosen:
# TODO: Sanitize this input to avoid type collisions
dice = int(input("Roll [1] or [2] dice? "))
if dice > 0 and dice < 3:
chosen = True
break
else:
print("Sorry: You can only enter a 1 or 2. Rolling {} dice is not permitted.".format(dice))
else:
pass
return dice
class Bot(Player):
def chooseCard(self, options=list):
if len(options) == 0:
print("Oh no - no valid purchase options this turn.")
return None
else:
cardname = random.choice(options)
return cardname
def chooseDice(self): # Just rolls the most dice he can
if self.hasTrainStation:
return 2
else:
return 1
class ThoughtfulBot(Bot):
def chooseCard(self, options=list):
if len(options) == 0:
print("Can't buy anything.")
return None
else:
upgrades = ["Radio Tower",
"Amusement Park",
"Shopping Mall",
"Train Station"]
earlycards = ["TV Station",
"Business Center",
"Stadium",
"Forest",
"Convenience Store",
"Ranch",
"Wheat Field",
"Cafe",
"Bakery"]
latecards = ["Mine",
"Furniture Factory",
"Cheese Factory",
"Family Restaurant",
"Apple Orchard",
"Fruit and Vegetable Market"]
if self.hasTrainStation:
preferences = upgrades + latecards + earlycards
else:
preferences = upgrades + earlycards
for priority in preferences:
for cardname in options:
if cardname == priority:
break
else:
pass
cardname = random.choice(options)
return cardname
def chooseDice(self):
if not self.hasTrainStation:
return 1
else:
return random.choice([1,2,2,2,2])
# === Define Class Card() === #
# Cards must have a name, cost, a payer, a payout amount, and one or more die rolls on which they "hit"
class Card(object):
def __init__(self):
self.name = None # Name should be a string like "Wheat Field"
self.payer = None # Payer can be 0 (bank), 1 (die roller), 2 (each other player), 3 (owner), or 4 (specific player)
self.recipient = None # Recipient can be 1 (die roller), 2 (each other player), or 3 (owner)
self.cost = 0 # Cost should be a non-zero integer
self.payout = 0 # Payout can be any integer
self.hitsOn = [0] # "Hits" can be one or more integers achievable on 2d6
self.owner = None # Cards start with no owner
self.category = None # Categories from the list below
self.multiplies = None # Also categories
def sortvalue(self):
from statistics import mean
value = 0.000
value += mean(self.hitsOn) # Sort by mean hit value
value += self.cost/100 # Then by cost
value += ord(str(self.name)[0])/255 # Then by pseudo-alphabetical
return value
def __eq__(self, other):
if self.sortvalue() == other.sortvalue():
return True
else:
return False
def __ne__(self, other):
if self.sortvalue() == other.sortvalue():
return False
else:
return True
def __lt__(self, other):
if self.sortvalue() < other.sortvalue():
return True
else:
return False
def __le__(self, other):
if self.sortvalue() <= other.sortvalue():
return True
else:
return False
def __gt__(self, other):
if self.sortvalue() > other.sortvalue():
return True
else:
return False
def __ge__(self, other):
if self.sortvalue() >= other.sortvalue():
return True
else:
return False
def __hash__(self):
return hash((self.name, self.category, self.cost))
def __str__(self):
# TODO: figure out which scope this list belongs in for card display
categories = {1:"|🌽|", 2:"|🐄|", 3:"|🏪|", 4:"|☕|", 5:"|⚙️| ", 6:"|🏭|", 7:"|🗼|", 8:"|🍎|"}
# WARNING: In Unicode, the "gear" emoji is decorated with U+FE0F, an invisible zero-space
# codepoint. Its full name is 'U+2699 U+FE0F'. Calls to format() double-count it when
# trying to do fixed width. Adding a space for padding and telling format() to display it
# as single-width seems to work. There probably are other solutions, but this one works.
catvalue = self.category
cardstring = "{:7} {:3} : {:16}".format(str(self.hitsOn), categories[catvalue], self.name)
# print("DEBUG: category for {} was {}".format(self.name, self.category))
# print("DEBUG: emoji lookup for category {} results in {:4}".format(catvalue, categories[catvalue]))
return cardstring
# TODO: card.helptext goes here - potentially adding info to __str__
class Green(Card):
def __init__(self, name: str, category: int, cost: int, payout: int, hitsOn: list, multiplies=None):
self.name = name
self.category = category
self.cost = cost
self.payout = payout
self.multiplies = multiplies
self.hitsOn = []
self.hitsOn = hitsOn
self.payer = 0 # Green cards always pay out from the bank (0)
self.recipient = 1 # Green cards always pay to the die roller (1)
def trigger(self, players: list): # Green cards increment the owner's bank by the payout
subtotal = 0
if self.owner.isrollingdice:
if not self.multiplies: # TODO: check this
print("This green card doesn't multiply anything.")
self.owner.deposit(self.payout)
print("{} pays out {} to {}.".format(self.name, self.payout, self.owner.name))
else:
for card in self.owner.deck.deck:
if card.category == self.multiplies:
subtotal += 1
else:
pass
print("{} has {} cards of type {}...".format(self.owner.name, subtotal, self.multiplies))
amount = self.payout * subtotal
print("{} pays out {} to {}.".format(self.name, amount, self.owner.name))
self.owner.deposit(amount)
else:
print("{} didn't roll the dice - no payout from {}.".format(self.owner.name, self.name))
class Red(Card):
def __init__(self, name: str, category: int, cost: int, payout: int, hitsOn: list):
self.name = name
self.category = category
self.cost = cost
self.payout = payout
self.hitsOn = hitsOn
self.payer = 1 # Red cards pay out from the die-roller (1)
self.recipient = 3 # Red cards pay to the card owner (3)
def trigger(self, players):
for person in players:
if person.isrollingdice:
dieroller = person
else:
pass
payout = dieroller.deduct(self.payout)
self.owner.deposit(payout)
class Blue(Card):
def __init__(self, name=str, category=int, cost=int, payout=int, hitsOn=list):
self.name = name
self.category = category
self.cost = cost
self.payout = payout
self.hitsOn = hitsOn
self.payer = 0 # Blue cards pay out fromm the bank (0)
self.recipient = 3 # Blue cards pay out to the card owner (3)
def trigger(self, players):
print("{} pays out {} to {}.".format(self.name, self.payout, self.owner.name))
self.owner.deposit(self.payout)
class Stadium(Card):
def __init__(self, name="Stadium"):
self.name = name
self.category = 7
self.cost = 6
self.recipient = 3 # Purple cards pay out to the die-roller (1)
self.hitsOn = [6] # Purple cards all hit on [6]
self.payer = 2 # Stadium collects from all players
self.payout = 2
def trigger(self, players: list):
for person in players:
if person.isrollingdice:
dieroller = person
else:
pass
for person in players:
payment = person.deduct(self.payout)
dieroller.deposit(payment)
class TVStation(Card):
def __init__(self, name="TV Station"):
self.name = name
self.category = 7
self.cost = 7
self.recipient = 1 # Purple cards pay out to the die-roller (1)
self.hitsOn = [6] # Purple cards all hit on [6]
self.payer = 4 # TV Station collects from one player
self.payout = 5
def trigger(self, players: list):
for person in players:
if person.isrollingdice:
dieroller = person
else:
pass
target = random.choice(players)
while target.isrollingdice:
target = random.choice(players)
payment = target.deduct(self.payout)
dieroller.deposit(payment)
class BusinessCenter(Card):
def __init__(self, name="Business Center"):
self.name = name
self.category = 7
self.cost = 8
self.recipient = 3 # Purple cards pay out to the die-roller (1)
self.hitsOn = [6] # Purple cards all hit on [6]
self.payer = 4 # Business Center collects from one targeted player (4)
self.payout = 0 # Payout is the ability to swap cards (!)
def trigger(self, players: list):
for person in players:
if person.isrollingdice:
dieroller = person
if self.owner == dieroller:
print("Swapping cards is not implemented just yet. Here's five bucks, kid.")
dieroller.deposit(5)
else:
print("No payout.")
class UpgradeCard(Card):
def __init__(self, name):
# TODO: perfect example of when to do @class attribs, I think
self.orangeCards = {
"Train Station" : [4, 7, "hasTrainStation"],
"Shopping Mall" : [10, 7, "hasShoppingMall"],
"Amusement Park" : [16, 7, "hasAmusementPark"],
"Radio Tower" : [22, 7, "hasRadioTower"]
}
self.name = name
self.cost = self.orangeCards[name][0]
self.category = self.orangeCards[name][1]
self.owner = None
self.hitsOn = [99] # For sorting purposes these cards should be listed last among a player's assets, with a number that can never be rolled
def bestowPower(self):
setattr(self.owner, self.orangeCards[self.name][2], True)
# print("DEBUG: bestowed a Special Power!!")
# print("{} now {}".format(self.owner.name, self.orangeCards[self.name][2]))
# "Stores" are wrappers for a deck[] list and a few functions; decks hold Card objects
class Store(object):
def __init__(self):
self.deck = []
self.frequencies = {}
def names(self, maxcost=99, flavor=Card): # A de-duplicated list of the available names
namelist = []
for card in self.deck:
if (card.name not in namelist) and isinstance(card, flavor) and (card.cost <= maxcost): # TODO: target hitsOn?
namelist.append(card.name)
else:
pass
return namelist
def freq(self):
f = {}
for card in self.deck:
if f.get(card):
f[card] += 1
else:
f[card] = 1
self.frequencies = f
return self.frequencies
def append(self, card):
if isinstance(card, Card):
self.deck.append(card)
self.deck.sort()
else:
TypeError()
def remove(self, card):
if isinstance(card, Card):
self.deck.remove(card)
self.deck.sort()
else:
TypeError()
class PlayerDeck(Store):
def __init__(self, owner):
self.deck = []
self.frequencies = {}
self.owner = owner
# TODO: don't repeat yourself - define these in one place and insert them from there
self.deck.append(Blue("Wheat Field",1,1,1,[1]))
self.deck.append(Green("Bakery",3,1,1,[2,3]))
for card in self.deck:
card.owner = self.owner
def __str__(self):
decktext = ""
for card in self.deck:
if isinstance(card, (Red, Green, Blue)):
decktext += "{} - {}\n".format(card.hitsOn, card.name)
else:
decktext += str(card)
return decktext
class TableDeck(Store):
def __init__(self):
self.deck = []
self.frequencies = {}
# categories = {1:"🌽", 2:"🐄", 3:"🏪", 4:"☕", 5:"⚙️", 6:"🏭", 7:"🗼", 8:"🍎"}
for _ in range(0,6):
# Add six of every card: Name, category, cost, payout, hitsOn[], and optionally, what it multiplies
self.append(Blue("Wheat Field",1,1,1,[1]))
self.append(Blue("Ranch",2,1,1,[2]))
self.append(Green("Bakery",3,1,1,[2,3]))
self.append(Red("Cafe",4,2,1,[3]))
self.append(Green("Convenience Store",3,2,3,[4]))
self.append(Blue("Forest",5,3,1,[5]))
self.append(Green("Cheese Factory",6,5,3,[7],2))
self.append(Green("Furniture Factory",6,3,3,[8],5))
self.append(Blue("Mine",5,6,5,[9]))
self.append(Red("Family Restaurant",4,3,2,[9,10]))
self.append(Blue("Apple Orchard",1,3,3,[10]))
self.append(Green("Fruit and Vegetable Market",8,2,2,[11,12],1))
self.append(TVStation())
self.append(BusinessCenter())
self.append(Stadium())
self.deck.sort()
# The UniqueDeck will replenish the TableDeck so players are only
# ever offered one copy of cards they can't buy twice
class UniqueDeck(Store):
def __init__(self, players: list):
self.deck = []
self.frequencies = {}
for _ in range(0, len(players)+1):
self.append(TVStation())
self.append(BusinessCenter())
self.append(Stadium())
self.append(UpgradeCard("Train Station"))
self.append(UpgradeCard("Shopping Mall"))
self.append(UpgradeCard("Amusement Park"))
self.append(UpgradeCard("Radio Tower"))
self.deck.sort()
# ==== Define top-level game functions ====
def setPlayers(players=None):
playerlist = []
if players == None:
moreplayers = True # TODO: allow user to pass in number of bots & humans to skip this call
while moreplayers:
humanorbot = input("Add a [H]uman or add a [B]ot? ")
if "h" in humanorbot.lower():
playername = input("What's the human's name? ")
playerlist.append(Human(name=str(playername)))
elif "b" in humanorbot.lower():
playername = input("What's the bot's name? ")
if playername[0] == "T":
playerlist.append(ThoughtfulBot(name=str(playername)))
else:
playerlist.append(Bot(name=str(playername)))
else:
print("Sorry, I couldn't find an H or B in your answer. ")
if len(playerlist) == 4:
break
elif len(playerlist) >= 2:
yesorno = input("Add another player? ([Y]es / [N]o) ")
if "y" in yesorno.lower():
pass
elif "n" in yesorno.lower():
moreplayers = False
break
else:
print("Sorry, I couldn't find a Y or N in your answer. ")
return playerlist
elif isinstance(players, int):
if players < 2:
players = 2
elif players > 4:
players = 4
else:
print("Unexpected variable for `players` in call to setPlayers()")
return
if players >=2 and players <= 4:
for num in range(players):
playerlist.append(Bot(name=str("Robo" + str(num))))
return playerlist
def display(deckObject):
f = deckObject.freq()
rowstring = ""
for card, quantity in f.items():
rowstring += "{:16}".format((str(card) + "|"))
for _ in range(quantity):
rowstring += "[]"
rowstring += str(quantity) + "\n"
print(rowstring)
def newGame(players=None):
availableCards = TableDeck()
playerlist = setPlayers(players)
specialCards = UniqueDeck(playerlist)
return availableCards, specialCards, playerlist
def nextTurn(playerlist: list, player, availableCards, specialCards):
# Reset the turn counter; start a new turn
for person in playerlist:
person.isrollingdice = False
player.isrollingdice = True
isDoubles = False
# Refresh purchase options
# If the player has a copy of the unique cards, don't present them as options
for card in specialCards.deck:
# print("DEBUG: current player is {}".format(player.name))
# print("DEBUG: player card list is {}".format(player.deck.names()))
# print("DEBUG: checking if {} is here...".format(card.name))
if (card.name not in player.deck.names()) and (card.name in availableCards.names()):
pass
# print("DEBUG: the {} is still for sale.".format(card.name))
elif (card.name not in player.deck.names()) and (card.name not in availableCards.names()):
# print("DEBUG: didn't find a {} for sale or in player deck".format(card.name))
availableCards.append(card)
specialCards.remove(card)
elif (card.name in player.deck.names()) and (card.name in availableCards.names()):
# print("DEBUG: Shouldn't offer the player a {}".format(card.name))
availableCards.remove(card)
specialCards.append(card)
elif (card.name in player.deck.names()) and (card.name not in availableCards.names()):
pass
# print("DEBUG: {} is correctly off the market.".format(card.name))
else:
print("WARN: Somehow left the truth table")
pass
# TODO: consider refactoring to a player-specific PlayerOptions
# deck with orange and purple cards, and then just updating it
# and adding it to availableCards each turn
# Die Rolling Phase
print("-=-=-= It's {}'s turn =-=-=-".format(player.name))
dieroll, isDoubles = player.dieroll()
print("{} rolled a {}.".format(player.name, dieroll))
# TODO: present option to re-roll if player.hasAmusementPark
for person in playerlist:
for card in person.deck.deck:
if dieroll in card.hitsOn:
print("{}'s {} activates on a {}...".format(person.name, card.name, dieroll))
card.trigger(playerlist) # TODO: integrate order of parsing
# Buy Phase
for person in playerlist:
print("{} now has {} coins.".format(person.name, person.bank))
print("-=-=-={}'s Deck=-=-=-".format(player.name))
display(player.deck)
options = availableCards.names(maxcost=player.bank)
cardname = player.chooseCard(options)
if cardname != None:
player.buy(cardname, availableCards)
return isDoubles
def functionalTest():
# Right now this is a set of integration tests...
# entities = ["the bank", "the player who rolled the dice", "the other players", "the card owner"]
playerlist = []
playerlist.append(Human("Jurph"))
jurph = playerlist[0]
availableCards = TableDeck()
for card in jurph.deck.deck:
print(card)
# thiscard = jurph.deck.deck[0]
print("Right now {} has {} coins.".format(playerlist[0].name, playerlist[0].bank))
dieroll = jurph.dieroll(1)
print("{} rolled a {}...".format(playerlist[0].name, dieroll))
for card in jurph.deck.deck:
if dieroll in card.hitsOn:
card.trigger(card.owner) # TODO: integrate order of parsing
print("Right now {} has {} coins.".format(playerlist[0].name, playerlist[0].bank))
jurph.buy("Mine", availableCards)
jurph.buy("Duck", availableCards)
jurph.buy("Forest", availableCards)
jurph.buy("Ranch", availableCards)
# TODO: pretty-print the decks in a useful format
for card in jurph.deck.deck:
print(card)
print("-=-=-=-=-=-")
def main():
# TODO: Eventually add "buffer=True" to suppress stdout
# Pull in command-line input
parser = argparse.ArgumentParser(description='The card game Machi Koro')
parser.add_argument('-t', '--test', dest='unittests', action='store_true', required=False, help='run unit tests instead of executing the game code')
args = parser.parse_args()
availableCards, specialCards, playerlist = newGame()
noWinnerYet = True
while noWinnerYet:
for turntaker in playerlist:
isDoubles = nextTurn(playerlist, turntaker, availableCards, specialCards)
if turntaker.isWinner():
noWinnerYet = False
print("{} wins!".format(turntaker.name))
exit()
else:
pass
while isDoubles:
if turntaker.hasAmusementPark: #TODO: figure out why there seems to be an infinite loop here?
print("{} rolled doubles and gets to go again!".format(turntaker.name))
isDoubles = nextTurn(playerlist, turntaker, availableCards, specialCards)
else:
break
if __name__ == "__main__":
main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 13:21:41 2020
@author: rahul
"""
#--------------------------------------------------Importing Libraries----------------------------------------------------
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from itertools import chain
#-------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------Intializing Parameters------------------------------------------------
bs = 0.1
noOfNeurons = 6
#-------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------Defining Functions----------------------------------------------------
#---------------------------------------------------Calculation Output---------------------------------------------------
def predValue(inputs,w):
output = sig( np.dot(inputs, w) + bs )
return output
#-------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------Sigmoid Activation----------------------------------------------------
def sig(x):
calculate = 1/(1+np.exp(-x))
return calculate
#-------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------Softmax Activation-----------------------------------------------------
def softmax(x):
exp = np.exp(x)
calculate = exp / exp.sum()
return calculate
#-------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------Accuracy Prediction---------------------------------------------------
def predAccuracy(originalLabel,predicted):
matched = 0
for i in range(len(originalLabel)):
if originalLabel[i] == predicted[i]:
matched += 1
accuracyVal = matched / float(len(originalLabel))
return accuracyVal
#--------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------Training Function-----------------------------------------------------
def train(train_input_data,outputTrainLabel):
hMatrix = []
finalWeights = []
for row in train_input_data:
h =[]
weights = []
for i in range(noOfNeurons):
output = 0
weight = np.random.rand(len(row))
weights.append(weight)
for j in range(len(row)):
output += row[j]*weight[j]
h.append(sig(output) + bs)
hMatrix.append(h)
finalWeights.append(weights)
beta = np.dot(np.linalg.pinv(hMatrix), outputTrainLabel)
return beta,finalWeights
#--------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------Testing Function-----------------------------------------------------
def test(data, outputD,b,weights):
hMatrix = []
m = 0
for row in data:
weight = weights[m]
h =[]
for i in range(noOfNeurons):
output = 0
we = weight[i]
for j in range(len(row)):
output += row[j]*we[j]
h.append(sig(output) + bs)
hMatrix.append(h)
m += 1
o = np.dot(hMatrix , b)
o[ o >= 0.5 ] = 1
o[ o < 0.5 ] = 0
acc = predAccuracy(outputD, o)
print("Testing Accuracy",acc*100,"%")
#--------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------Main Function-------------------------------------------------------
if __name__ == "__main__":
data = pd.read_csv("transfusion.csv")
wholeDataset = pd.DataFrame(data)
wholeDataset = (wholeDataset).astype(float)
inputData = wholeDataset.drop(columns=[wholeDataset.columns[-1]]).to_numpy()
outputLabel = wholeDataset[wholeDataset.columns[-1]].to_numpy()
#---------------------------------------------------Train Test Splitting------------------------------------------------
X_train, X_test, y_train, y_test = train_test_split(inputData, outputLabel, test_size=0.33,random_state = 42)
#-----------------------------------------------------------------------------------------------------------------------
y_train = y_train.reshape(len(y_train),1)
y_test = y_test.reshape(len(y_test),1)
#---------------------------------------------------Training Start-----------------------------------------------------
beta,weights = train(X_train,y_train)
#---------------------------------------------------Training End-------------------------------------------------------
#---------------------------------------------------Testing Start------------------------------------------------------
test(X_test,y_test,beta,weights)
#---------------------------------------------------Testing End---------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------
|
#plotting a graph using data of a CSV file
import matplotlib.pyplot as plt
import csv
#file location of the csv file
filename = "D:\Sheet1.csv"
x = []
y = []
#opening csv file to read
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
#iterating over the objects in csv file
for row in csvreader:
x.append(int(row[0]))
y.append(int(row[1]))
#adding color,size and style to marker and line
plt.plot(x, y, color='b', linestyle='-', marker='o', markerfacecolor='g', markersize=5)
#naming the axis and the graph
plt.xlabel("x-axis")
plt.ylabel("y-axis")
plt.title("Plotting from csv data")
plt.show()
|
# matplotlib
# 1. 그래프그리기.
1. X는 0~99, Y는 0~99로 변수정하고 이를 plot해라
2. X_1은 100까지, Y_1은 y=cos(x), X_2도 100까지, Y_2는 y=sin(x),그리고 추가로 y=x도 plot을 한번에하셈.
3. 10*10 inch의 figure set을 만들고 각각 1,2,1/1,2,2의 2개의 판을 넣는다.
첫번재판엔 위의 cos, 두째판엔 sin 그래프넣어라.
#color
4. x= 0~100, y=x, y=x+100을 plot해라.단 wjswksms 색깔 '#000000', 후자는 'c'
#linestyle
5. 위의 것을 활용하여 라인스타일을 다르게해라. 전자는 dashed, 후자는 dotted
6. 범례추가해라 위치는 upper right
# 3. 그래프 스타일.
7. data1과 data2의 변수를 지정해라. 512 * 2 shape으로 선언.
8. 각각 plot 한다. 옵션은 scatter그래프, 512 * 1을 x로 512 * 2 를 y로, 색은 한개는 블루 한개는 레드, 마커는 각각x, o로 해라.
# 4. Histogram (바차트나 이런것들 불필요해보여서 걍 뻄.)
9. X에 1000개의 랜덤수를 넣어라,
10. 구간을 100개로 해서 히스토그램으로 그려라.
# 4. Box Plot
11. data에 100*5의 랜덤변수를 넣어라
12. 박스플랏으로 플랏해라.
|
import numpy as np
import cv2
#Reading the images
logo= cv2.imread('python_logo.jpg',cv2.IMREAD_COLOR)
img1= cv2.imread('test_image_1.jpg',cv2.IMREAD_COLOR)
img2 = cv2.imread('test_image_2.jpg',cv2.IMREAD_COLOR)
sized_logo = cv2.resize(logo, (128, 128))
sized_img1 = cv2.resize(img1, (512, 512))
sized_img2 = cv2.resize(img2, (512, 512))
# Simple Numpy array addition
add = sized_img1 + sized_img2
# Using cv2.add(imag1,image2) adds pixel wise values of the pixels with a maximum value truncated to [255,255,255]
add1 = cv2.add(sized_img1,sized_img2)
#Weighted addition cv2.addWeighted(image1,weight of image 1, image2, weight of image2, gamma)
weighted = cv2.addWeighted(sized_img1,0.6,sized_img2,0.4,0)
#Performing Image on Image Arithmetics
rows,cols,channels = sized_logo.shape
roi = sized_img1[0:rows,0:cols]
#creating mask on the small image
#Converting the small image to grayscale
img2gray = cv2.cvtColor(sized_logo, cv2.COLOR_BGR2GRAY)
#Thresholding cv2.threshold(image_name, min.pixelvalue,max.pixelvalue, threshold type - binaryinverse)
# pixelvalue > 220 --> WHITE and then Inversed similarly for pixel_value< 220
ret,mask = cv2.threshold(img2gray, 220, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('sized_logo',sized_logo)
#Displaying the mask
cv2.imshow('mask',mask)
#referencing the blacked out area of the mask
#bitwise --> lowlevel logical operations
mask_inv = cv2.bitwise_not(mask)
cv2.imshow('mask_inv',mask_inv)
sized_img1_bg = cv2.bitwise_and(roi, roi, mask = mask_inv)
cv2.imshow('sized_img1_bg',sized_img1_bg)
sized_logo_fg = cv2.bitwise_and(sized_logo,sized_logo, mask = mask)
cv2.imshow('sized_logo_fg',sized_logo_fg)
dst = cv2.add(sized_img1_bg,sized_logo_fg)
cv2.imshow('dst',dst)
sized_img1[0:rows,0:cols] = dst
cv2.imshow('res',sized_img1)
#Displaying the Images
# cv2.imshow('sized_img', sized_img)
# cv2.imshow('sized_img1', sized_img1)
# cv2.imshow('sized_img2', sized_img2)
# cv2.imshow('add', add)
# cv2.imshow('add1', add1)
# cv2.imshow('weighted', weighted)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
a, b = 1, 2
sum = 0
while b < 4000000:
print "a:" + str(a) + " b:" + str(b),
if b % 2 == 0:
sum += b
print " sum:" + str(sum)
a, b = b, a+b
|
import pytorch_lightning as pl
import segmentation_models_pytorch as smp
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from data.dataset_seg import IntracranialDataset
from models.commons.get_base_model import load_base_weights
from models.commons.radam import RAdam
class SegmentationModel(pl.LightningModule):
def __init__(self, config):
super(SegmentationModel, self).__init__()
self.config = config
self.train_folds = config.train_folds
self.val_folds = config.val_folds
if config.pretrained == 'imagenet':
self.model = smp.Unet(config.backbone, classes=config.n_classes, activation='sigmoid')
else:
self.model = smp.Unet(config.backbone, classes=config.n_classes, activation='sigmoid', encoder_weights=None)
if config.pretrained is not None:
weights = load_base_weights(config.pretrained, 3, '0.conv1.weight')
weights = {'layer' + k: v for k, v in weights.items()}
weights['last_linear.bias'] = None
weights['last_linear.weight'] = None
self.model.encoder.load_state_dict(weights)
self.scheduler = None
self.loss_func = smp.utils.losses.BCEDiceLoss(eps=1.)
self.iou_metric = smp.utils.metrics.IoUMetric(eps=1., activation='sigmoid')
self.f_score_metric = smp.utils.metrics.FscoreMetric(eps=1., activation='sigmoid')
def forward(self, x):
x = self.model(x)
return x
def predict(self, x):
x = self.model.predict(x)
return x
# training step and validation step should return tensor or nested dicts of tensor for data parallel to work
def training_step(self, batch, batch_nb):
x, y = batch['image'], batch['seg']
y_hat = self.forward(x)
lr = self.trainer.optimizers[0].param_groups[0]['lr']
return {'loss': self.loss_func(y_hat, y),
'progress': {'learning_rate': lr}}
def validation_step(self, batch, batch_nb):
x, y = batch['image'], batch['seg']
y_hat = self.forward(x)
return {'val_loss': self.loss_func(y_hat, y),
'batch_iou': self.iou_metric(y_hat, y),
'batch_any_iou': self.iou_metric(y_hat[:, -1, :, :], y[:, -1, :, :]),
'batch_fscore': self.f_score_metric(y_hat, y)}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_iou = torch.stack([x['batch_iou'] for x in outputs]).mean()
val_iou_any = torch.stack([x['batch_any_iou'] for x in outputs]).mean()
avg_fscore = torch.stack([x['batch_fscore'] for x in outputs]).mean()
return {'avg_val_loss': avg_loss,
'val_iou': avg_iou,
'avg_fscore': avg_fscore,
'val_iou_any': val_iou_any}
def on_batch_start(self, batch):
if self.config.scheduler['name'] == 'flat_anneal':
flat_iter = self.config.scheduler['flat_iterations']
anneal_iter = self.config.scheduler['anneal_iterations']
if flat_iter <= self.global_step < flat_iter + anneal_iter:
self.scheduler.step()
def configure_optimizers(self):
if self.config.optimizer == 'adam':
optimizer = torch.optim.Adam([{'params': self.model.decoder.parameters(), 'lr': self.config.decoder_lr},
{'params': self.model.encoder.parameters(), 'lr': self.config.encoder_lr}, ],
lr=self.config.lr, weight_decay=self.config.weight_decay)
elif self.config.optimizer == 'radam':
optimizer = RAdam([{'params': self.model.decoder.parameters(), 'lr': self.config.decoder_lr},
{'params': self.model.encoder.parameters(), 'lr': self.config.encoder_lr}, ],
lr=self.config.lr, weight_decay=self.config.weight_decay)
if self.config.scheduler['name'] == 'flat_anneal':
self.scheduler = CosineAnnealingLR(optimizer, self.config.scheduler['anneal_iterations'],
self.config.scheduler['min_lr'])
return optimizer
@pl.data_loader
def train_dataloader(self):
use_negatives = True if self.config.negative_data_steps is not None else False
return DataLoader(IntracranialDataset(self.config, self.train_folds, augment=self.config.augment,
use_negatives=use_negatives),
num_workers=self.config.num_workers,
batch_size=self.config.batch_size,
shuffle=True)
@pl.data_loader
def val_dataloader(self):
return DataLoader(IntracranialDataset(self.config, self.val_folds),
num_workers=self.config.num_workers,
batch_size=self.config.batch_size)
|
from datetime import datetime
import uuid
from django.db import models
from stdimage.models import StdImageField
# Create your models here.
class Blog(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
category = models.ForeignKey('Category',on_delete=models.SET_DEFAULT,default=1,related_name='blogs',verbose_name="分类")
title = models.CharField('标题',max_length=30)
banner = StdImageField(verbose_name = '文章导航图片',upload_to='blog_banner',
variations={'thumbnail': {'width': 300, 'height': 75}})
pub_time = models.DateTimeField(False,auto_now_add=True)
update_time = models.DateTimeField(True,auto_now_add=False,default=datetime.now)
author = models.ForeignKey('user.Company',on_delete=models.SET_DEFAULT,default=1,related_name='blogs',verbose_name="作者")
content = models.TextField("文章内容")
abstract = models.CharField('文章摘要',max_length=140)
slug = models.SlugField('文章短链接',max_length=100,unique=True)
views = models.PositiveIntegerField('浏览数',default=0)
class Meta:
ordering = ["pub_time"]
verbose_name = "博客"
verbose_name_plural=verbose_name
def __str__(self):
return '{}'.format(self.title)
class Category(models.Model):
name = models.CharField('分类',max_length=30,default='未分类')
class Meta:
verbose_name = "分类"
verbose_name_plural=verbose_name
def __str__(self):
return '{}'.format(self.name)
class Comment(models.Model):
username = models.CharField('留言者名',max_length=30)
head_img = models.ImageField('留言者头像',upload_to='comment_user')
comment_time = models.DateTimeField('评论时间',False,auto_now_add=True)
email = models.EmailField(verbose_name='留言者邮箱')
content = models.TextField(verbose_name='留言内容')
blog = models.ForeignKey('Blog',on_delete=models.CASCADE,related_name='comments',verbose_name="评论")
class Meta:
ordering = ["comment_time"]
verbose_name = "评论"
verbose_name_plural=verbose_name |
#import time
HELP = """
help - выводить список команд
add - добавить задачу
show - показать задачи
done - убрать выполненную задачу
exit - закрыть приложение
"""
todo = {}
def checkDate(date):
try:
time.strptime(date, "%d.%m.%Y")
return True
except ValueError:
print("Error. Не правильный формат даты")
return False
def add(command, userAnswer):
if command == 1:
#Получить дату
elif command == 2:
#получить task и добавить в todo
userDate = input("Введите дату:\n")
if checkDate(userDate) == Flask:
continue
userTask = input("Что нужно делать?")
if userDate in todo.keys():
todo[ userDate ].append( userTask )
else:
todo[ userDate ] = [ userTask]
todo[ userDate ] = [ userTask ]
print(f"[ {userDate} ] - добавлена задача'{userTask}'")
print ("Введите команду, введите help для вывода списка команд")
while True:
userAnswer = input()
if userAnswer == "add":
elif userAnswer == "help":
print(HELP)
elif userAnswer == "show":
for date in todo.keys():
for tasks in todo[ date ]:
print(f"[ {date} ] - { tasks }")
elif userAnswer == "exit":
break
elif userAnswer == "done":
print("Работает\n") |
# Generated by Django 2.0.3 on 2018-03-25 00:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_playercharacter_knocked_down'),
]
operations = [
migrations.AddField(
model_name='monstercharacter',
name='respawn_health',
field=models.IntegerField(default=4),
),
migrations.AddField(
model_name='playercharacter',
name='respawn_health',
field=models.IntegerField(default=4),
),
]
|
import math
import random
random.seed(101)
m = int(input('M: '))
n = int(input('N: '))
initial_sequence = input('Starting action_sequence (West: w, East: e, North: n, South: s): ').split()
# move_cost: North, South, West, East
# move_cost = [2, 2, 2, 2]
move_cost = [random.randint(2, 5), random.randint(2, 5), random.randint(2, 5), random.randint(2, 5)]
class Point:
def __init__(self, coordinate):
self.coordinate = coordinate
# possible_move: North, South, West, East
self.possible_move = [1, 1, 1, 1]
def go_up(self):
new_position = list(self.coordinate)
if new_position[0] <= 0:
return None
else:
new_position[0] -= 1
return tuple(new_position)
def go_down(self):
new_position = list(self.coordinate)
if new_position[0] >= m - 1:
return None
else:
new_position[0] += 1
return tuple(new_position)
def go_left(self):
new_position = list(self.coordinate)
if new_position[1] <= 0:
return None
else:
new_position[1] -= 1
return tuple(new_position)
def go_right(self):
new_position = list(self.coordinate)
if new_position[1] >= n - 1:
return None
else:
new_position[1] += 1
return tuple(new_position)
def get_position(self):
return self.coordinate
all_points = []
for i in range(m):
row = []
for j in range(n):
row.append(Point((i, j)))
all_points.append(row)
all_points[0][0].possible_move = [0, 1, 0, 1]
all_points[m - 1][n - 1].possible_move = [0, 0, 0, 0]
initial_path = [all_points[0][0]]
stack_cost = [0]
min_cost = 999
count = 0
best_path = []
for s in initial_sequence:
if s == 'n':
new_coordinate = initial_path[-1].go_up()
if new_coordinate is not None:
initial_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
initial_path[-2].possible_move[0] = 0
initial_path[-1].possible_move[1] = 0
stack_cost[0] /= move_cost[0]
elif s == 's':
new_coordinate = initial_path[-1].go_down()
if new_coordinate is not None:
initial_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
initial_path[-2].possible_move[1] = 0
initial_path[-1].possible_move[0] = 0
stack_cost[0] *= move_cost[1]
elif s == 'w':
new_coordinate = initial_path[-1].go_left()
if new_coordinate is not None:
initial_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
initial_path[-2].possible_move[2] = 0
initial_path[-1].possible_move[3] = 0
stack_cost[0] -= move_cost[2]
elif s == 'e':
new_coordinate = initial_path[-1].go_right()
if new_coordinate is not None:
initial_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
initial_path[-2].possible_move[3] = 0
initial_path[-1].possible_move[2] = 0
stack_cost[0] += move_cost[3]
stack_path = [initial_path]
def Solution(current_path, current_cost):
global min_cost, all_points, best_path, count
if current_cost < min_cost:
min_cost = current_cost
count += 1
print('Count: ', count)
print('Current minimum cost:', current_cost)
print('Current optimal path:')
best_path = [x.get_position() for x in current_path]
print(best_path)
print('\n')
def BoundingObsolete(current_cost, current_path):
global all_points, min_cost, best_path
if (current_cost + 1000 - max(move_cost[2], move_cost[3]) * (n - current_path[-1].get_position()[1])) / (
max(move_cost[0], move_cost[1]) ** (m - current_path[-1].get_position()[1])) < min_cost + 1000:
return True
else:
return False
def Bounding(current_cost, current_path):
global all_points, min_cost, best_path
if math.exp(current_cost / 10000) / (
max(move_cost[0], move_cost[1]) ** (
(m - current_path[-1].get_position()[0]) * (n - current_path[-1].get_position()[1]))) < math.exp(
min_cost / 10000):
return True
else:
return False
def DFS():
global stack_path, min_cost, move_cost, all_points, stack_cost, count, best_path
if len(stack_path) == 0:
return
current_path = stack_path.pop()
current_cost = stack_cost.pop()
current_node = current_path[-1]
for v in range(4):
if current_node.possible_move[v] == 1:
if v == 0:
if current_node.go_up() is not None:
new_coordinate = current_node.go_up()
current_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
current_cost /= move_cost[0]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[1] = 0
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[0] = 0
if current_path[-1].get_position() == (m - 1, n - 1):
Solution(current_path, current_cost)
elif current_path[-1].possible_move != [0, 0, 0, 0] and Bounding(current_cost,
current_path):
stack_path.append(current_path)
stack_cost.append(current_cost)
DFS()
current_path.pop()
current_cost *= move_cost[0]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[1] = 1
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[0] = 1
elif v == 1:
if current_node.go_down() is not None:
new_coordinate = current_node.go_down()
current_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
current_cost *= move_cost[1]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[0] = 0
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[1] = 0
if current_path[-1].get_position() == (m - 1, n - 1):
Solution(current_path, current_cost)
elif current_path[-1].possible_move != [0, 0, 0, 0] and Bounding(current_cost,
current_path):
stack_path.append(current_path)
stack_cost.append(current_cost)
DFS()
current_path.pop()
current_cost /= move_cost[1]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[0] = 1
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[1] = 1
elif v == 2:
if current_node.go_left() is not None:
new_coordinate = current_node.go_left()
current_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
current_cost -= move_cost[2]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[3] = 0
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[2] = 0
if current_path[-1].get_position() == (m - 1, n - 1):
Solution(current_path, current_cost)
elif current_path[-1].possible_move != [0, 0, 0, 0] and Bounding(current_cost,
current_path):
stack_path.append(current_path)
stack_cost.append(current_cost)
DFS()
current_path.pop()
current_cost += move_cost[2]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[3] = 1
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[2] = 1
elif v == 3:
if current_node.go_right() is not None:
new_coordinate = current_node.go_right()
current_path.append(all_points[new_coordinate[0]][new_coordinate[1]])
current_cost += move_cost[3]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[2] = 0
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[3] = 0
if current_path[-1].get_position() == (m - 1, n - 1):
Solution(current_path, current_cost)
elif current_path[-1].possible_move != [0, 0, 0, 0] and Bounding(current_cost,
current_path):
stack_path.append(current_path)
stack_cost.append(current_cost)
DFS()
current_path.pop()
current_cost -= move_cost[3]
all_points[new_coordinate[0]][new_coordinate[1]].possible_move[2] = 1
all_points[current_node.get_position()[0]][current_node.get_position()[1]].possible_move[3] = 1
DFS()
|
# Generated by Django 3.0.5 on 2020-05-01 22:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0012_access_user'),
]
operations = [
migrations.RemoveField(
model_name='access',
name='user',
),
]
|
import os
import subprocess
import sys
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import pytest
import requests
import ray
import ray.actor
import ray._private.state
from ray.util.state import list_actors
from ray import serve
from ray._private.test_utils import wait_for_condition
from ray.exceptions import RayActorError
from ray.serve._private.constants import SERVE_NAMESPACE, SERVE_DEFAULT_APP_NAME
from ray.serve.context import get_global_client
from ray.tests.conftest import call_ray_stop_only # noqa: F401
@pytest.fixture
def shutdown_ray_and_serve():
serve.shutdown()
if ray.is_initialized():
ray.shutdown()
yield
serve.shutdown()
if ray.is_initialized():
ray.shutdown()
@contextmanager
def start_and_shutdown_ray_cli():
subprocess.check_output(["ray", "stop", "--force"])
wait_for_condition(_check_ray_stop, timeout=15)
subprocess.check_output(["ray", "start", "--head"])
yield
subprocess.check_output(["ray", "stop", "--force"])
wait_for_condition(_check_ray_stop, timeout=15)
@pytest.fixture(scope="function")
def start_and_shutdown_ray_cli_function():
with start_and_shutdown_ray_cli():
yield
def _check_ray_stop():
try:
requests.get("http://localhost:52365/api/ray/version")
return False
except Exception:
return True
def test_standalone_actor_outside_serve(shutdown_ray_and_serve):
# https://github.com/ray-project/ray/issues/20066
ray.init(num_cpus=8, namespace="serve")
@ray.remote
class MyActor:
def ready(self):
return
a = MyActor.options(name="my_actor").remote()
ray.get(a.ready.remote())
serve.start()
serve.shutdown()
ray.get(a.ready.remote())
ray.shutdown()
def test_memory_omitted_option(shutdown_ray_and_serve):
"""Ensure that omitting memory doesn't break the deployment."""
@serve.deployment(ray_actor_options={"num_cpus": 1, "num_gpus": 1})
def hello(*args, **kwargs):
return "world"
ray.init(num_gpus=3, namespace="serve")
handle = serve.run(hello.bind())
assert ray.get(handle.remote()) == "world"
@pytest.mark.parametrize("detached", [True, False])
@pytest.mark.parametrize("ray_namespace", ["arbitrary", SERVE_NAMESPACE, None])
def test_serve_namespace(shutdown_ray_and_serve, detached, ray_namespace):
"""Test that Serve starts in SERVE_NAMESPACE regardless of driver namespace."""
with ray.init(namespace=ray_namespace) as ray_context:
@serve.deployment
def f(*args):
return "got f"
serve.run(f.bind())
actors = list_actors(
address=ray_context.address_info["address"],
filters=[("state", "=", "ALIVE")],
)
assert len(actors) == 3
# All actors should be in the SERVE_NAMESPACE, so none of these calls
# should throw an error.
for actor in actors:
ray.get_actor(name=actor["name"], namespace=SERVE_NAMESPACE)
assert requests.get("http://localhost:8000/f").text == "got f"
@pytest.mark.parametrize("detached", [True, False])
def test_update_num_replicas(shutdown_ray_and_serve, detached):
"""Test updating num_replicas."""
with ray.init() as ray_context:
@serve.deployment(num_replicas=2)
def f(*args):
return "got f"
serve.run(f.bind())
actors = list_actors(
address=ray_context.address_info["address"],
filters=[("state", "=", "ALIVE")],
)
serve.run(f.options(num_replicas=4).bind())
updated_actors = list_actors(
address=ray_context.address_info["address"],
filters=[("state", "=", "ALIVE")],
)
# Check that only 2 new replicas were created
assert len(updated_actors) == len(actors) + 2
serve.run(f.options(num_replicas=1).bind())
updated_actors = list_actors(
address=ray_context.address_info["address"],
filters=[("state", "=", "ALIVE")],
)
# Check that all but 1 replica has spun down
assert len(updated_actors) == len(actors) - 1
@pytest.mark.parametrize("detached", [True, False])
def test_refresh_controller_after_death(shutdown_ray_and_serve, detached):
"""Check if serve.start() refreshes the controller handle if it's dead."""
ray.init(namespace="ray_namespace")
serve.shutdown() # Ensure serve isn't running before beginning the test
serve.start(detached=detached)
old_handle = get_global_client()._controller
ray.kill(old_handle, no_restart=True)
def controller_died(handle):
try:
ray.get(handle.check_alive.remote())
return False
except RayActorError:
return True
wait_for_condition(controller_died, handle=old_handle, timeout=15)
# Call start again to refresh handle
serve.start(detached=detached)
new_handle = get_global_client()._controller
assert new_handle is not old_handle
# Health check should not error
ray.get(new_handle.check_alive.remote())
def test_get_serve_status(shutdown_ray_and_serve):
ray.init()
@serve.deployment
def f(*args):
return "Hello world"
serve.run(f.bind())
client = get_global_client()
status_info_1 = client.get_serve_status()
assert status_info_1.app_status.status == "RUNNING"
assert status_info_1.deployment_statuses[0].name == "f"
assert status_info_1.deployment_statuses[0].status in {"UPDATING", "HEALTHY"}
def test_controller_deserialization_deployment_def(
start_and_shutdown_ray_cli_function, shutdown_ray_and_serve
):
"""Ensure controller doesn't deserialize deployment_def or init_args/kwargs."""
@ray.remote
def run_graph():
"""Deploys a Serve application to the controller's Ray cluster."""
from ray import serve
from ray._private.utils import import_attr
from ray.serve.api import build
# Import and build the graph
graph = import_attr("test_config_files.pizza.serve_dag")
app = build(graph, SERVE_DEFAULT_APP_NAME)
# Override options for each deployment
for name in app.deployments:
app.deployments[name].set_options(ray_actor_options={"num_cpus": 0.1})
# Run the graph locally on the cluster
serve.run(graph)
# Start Serve controller in a directory without access to the graph code
ray.init(
address="auto",
namespace="serve",
runtime_env={
"working_dir": os.path.join(os.path.dirname(__file__), "storage_tests")
},
)
serve.start()
serve.context._global_client = None
ray.shutdown()
# Run the task in a directory with access to the graph code
ray.init(
address="auto",
namespace="serve",
runtime_env={"working_dir": os.path.dirname(__file__)},
)
ray.get(run_graph.remote())
wait_for_condition(
lambda: requests.post("http://localhost:8000/", json=["ADD", 2]).text
== "4 pizzas please!"
)
def test_controller_deserialization_args_and_kwargs(shutdown_ray_and_serve):
"""Ensures init_args and init_kwargs stay serialized in controller."""
serve.start()
client = get_global_client()
class PidBasedString(str):
pass
def generate_pid_based_deserializer(pid, raw_deserializer):
"""Cannot be deserialized by the process with specified pid."""
def deserializer(*args):
import os
if os.getpid() == pid:
raise RuntimeError("Cannot be deserialized by this process!")
else:
return raw_deserializer(*args)
return deserializer
PidBasedString.__reduce__ = generate_pid_based_deserializer(
ray.get(client._controller.get_pid.remote()), PidBasedString.__reduce__
)
@serve.deployment
class Echo:
def __init__(self, arg_str, kwarg_str="failed"):
self.arg_str = arg_str
self.kwarg_str = kwarg_str
def __call__(self, request):
return self.arg_str + self.kwarg_str
serve.run(Echo.bind(PidBasedString("hello "), kwarg_str=PidBasedString("world!")))
assert requests.get("http://localhost:8000/Echo").text == "hello world!"
def test_controller_recover_and_delete(shutdown_ray_and_serve):
"""Ensure that in-progress deletion can finish even after controller dies."""
ray_context = ray.init()
serve.start()
client = get_global_client()
num_replicas = 10
@serve.deployment(
num_replicas=num_replicas,
ray_actor_options={"num_cpus": 0.001},
)
def f():
pass
serve.run(f.bind())
actors = list_actors(
address=ray_context.address_info["address"], filters=[("state", "=", "ALIVE")]
)
# Try to delete the application and kill the controller right after
serve.delete(SERVE_DEFAULT_APP_NAME, _blocking=False)
ray.kill(client._controller, no_restart=False)
# All replicas should be removed already or after the controller revives
wait_for_condition(
lambda: len(
list_actors(
address=ray_context.address_info["address"],
filters=[("state", "=", "ALIVE")],
)
)
< len(actors)
)
wait_for_condition(
lambda: len(
list_actors(
address=ray_context.address_info["address"],
filters=[("state", "=", "ALIVE")],
)
)
== len(actors) - num_replicas
)
# The application should be deleted.
wait_for_condition(
lambda: SERVE_DEFAULT_APP_NAME not in serve.status().applications
)
def test_serve_stream_logs(start_and_shutdown_ray_cli_function):
"""Test that serve logs show up across different drivers."""
file1 = """from ray import serve
@serve.deployment
class A:
def __call__(self):
return "Hello A"
serve.run(A.bind())"""
file2 = """from ray import serve
@serve.deployment
class B:
def __call__(self):
return "Hello B"
serve.run(B.bind())"""
with NamedTemporaryFile() as f1, NamedTemporaryFile() as f2:
f1.write(file1.encode("utf-8"))
f1.seek(0)
# Driver 1 (starts Serve controller)
output = subprocess.check_output(["python", f1.name], stderr=subprocess.STDOUT)
assert "Connecting to existing Ray cluster" in output.decode("utf-8")
assert (
"Adding 1 replica to deployment A in application 'default'"
in output.decode("utf-8")
)
f2.write(file2.encode("utf-8"))
f2.seek(0)
# Driver 2 (reconnects to the same Serve controller)
output = subprocess.check_output(["python", f2.name], stderr=subprocess.STDOUT)
assert "Connecting to existing Ray cluster" in output.decode("utf-8")
assert (
"Adding 1 replica to deployment B in application 'default'"
in output.decode("utf-8")
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
|
import logging
from typing import Dict, Iterable
import pymorphy2
from .utils import Singleton
logger = logging.getLogger(__name__)
class Inflector(metaclass=Singleton):
def __init__(self) -> None:
self._morph = pymorphy2.MorphAnalyzer()
def inflect_to_case(self, string_to_inflect: str, case: str) -> str:
"""Inflect all words in string to case"""
inflected_words = []
words_to_inflect = str(string_to_inflect).split()
for word in words_to_inflect:
inflected_words.append(self._safe_inflect(word, case))
return ' '.join(inflected_words)
def inflect_to_cases(self, string_to_inflect: str, cases: Iterable[str]) -> Dict[str, str]:
"""Inflect all words in string to multiple cases"""
result = dict()
for case in cases:
result[case] = self.inflect_to_case(string_to_inflect, case)
return result
def _safe_inflect(self, string: str, case: str) -> str:
is_capitalized_string = string[0].isupper()
try:
# .inflect() can return None and None.word will raise AttributeError
string = self._morph.parse(string)[0].inflect({case}).word
except AttributeError:
logger.warning('Cannot inflect word: {} to {} case.'.format(string, case))
if is_capitalized_string:
string = string.capitalize()
return string
|
# frame_grabber.py - Frame grabber for full resolution stills from video
# Mainained by Anthony Spears aspears@gatech.edu
# Internal note: Using virtualenvwrapper - $ workon frame_grabber
# The output png files are named based on KITTI dataset formats
import ffmpeg
import cv2
import sys
#########################################################################
in_filename = '/media/aspears3/Data/icefin_2018_barne_glacier_seafloor.mov'
fps_out = 5
#########################################################################
# Open video file for reading
in_vid = cv2.VideoCapture(in_filename)
# Open txt file for writing
out_txt = open("times.txt","w")
#Exit if video not opened.
if not in_vid.isOpened():
print('Can\'t open input video file')
sys.exit()
# Read first image and get resolution
ok, frame = in_vid.read()
if not ok:
print('Can\'t read video file')
sys.exit()
#cv2.imwrite("%06d.png" % 0, frame)
width, height = frame.shape[1], frame.shape[0
]
# Get frame rate of input video.
fps = in_vid.get(cv2.CAP_PROP_FPS)
print("input video fps:", fps)
frame_counter = 0
i=0
while True:
if ((frame_counter % (fps/fps_out)) == 0):
cv2.imwrite("%06d.png" % i, frame)
print("%06d.png" % i)
t=frame_counter/fps
out_txt.write(str(t))
out_txt.write('\n')
i += 1
# Read a new frame
ok, frame = in_vid.read()
if not ok:
break
frame_counter += 1
out_txt.close()
# Selecting one every n frames from a video using FFmpeg:
# https://superuser.com/questions/1274661/selecting-one-every-n-frames-from-a-video-using-ffmpeg
# ffmpeg -y -r 10 -i in.mp4 -vf "select=not(mod(n\,10))" -vsync vfr -vcodec libx264 -crf 18 1_every_10.mp4
##(
## ffmpeg
## .input(in_filename, r=str(fps*10))
## .output(out_filename, vf='select=not(mod(n\,10))', vsync='vfr', vcodec='libx264', crf='18')
## .overwrite_output()
## .run()
##)
|
import pickletools as pt
import pikara.analysis as pa
from .test_parse import ops
def test_NONE():
po = pa.PickledObject.for_parsed_op(ops.NONE, None)
assert po.pickletools_type is pt.pynone
assert po.value is None
def test_NEWFALSE():
po = pa.PickledObject.for_parsed_op(ops.NEWFALSE, None)
assert po.pickletools_type is pt.pybool
assert po.value is False
def test_NEWTRUE():
po = pa.PickledObject.for_parsed_op(ops.NEWTRUE, None)
assert po.pickletools_type is pt.pybool
assert po.value is True
def test_INT():
po = pa.PickledObject.for_parsed_op(ops.INT, 15)
assert po.pickletools_type is pt.pyinteger_or_bool
assert po.value == 15
def test_BININT():
po = pa.PickledObject.for_parsed_op(ops.BININT, 15)
assert po.pickletools_type is pt.pyint
assert po.value == 15
def test_eq_same_type_and_value():
po1 = pa.PickledObject.for_parsed_op(ops.BININT, 15)
po2 = pa.PickledObject.for_parsed_op(ops.BININT, 15)
assert po1 == po2
def test_eq_same_type_different_value():
po1 = pa.PickledObject.for_parsed_op(ops.BININT, 15)
po2 = pa.PickledObject.for_parsed_op(ops.BININT, 18)
assert po1 == po2
def test_eq_different_type():
po1 = pa.PickledObject.for_parsed_op(ops.BININT, 15)
po2 = pa.PickledObject.for_parsed_op(ops.NEWTRUE, None)
assert po1 != po2
def test_eq_against_value():
assert pa.PickledObject.for_parsed_op(ops.BININT, 15) == 15
def test_eq_against_different_value():
assert pa.PickledObject.for_parsed_op(ops.BININT, 15) != 16
def test_eq_against_type():
po = pa.PickledObject.for_parsed_op(ops.BININT, 15)
assert po == pt.pyint
def test_eq_against_different_type():
po = pa.PickledObject.for_parsed_op(ops.BININT, 15)
assert po == pt.pyint
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 16:38:37 2018
@author: Inki Kim's lab
"""
import pandas as pd
import numpy as np
import os
os.chdir('G:\\Resource for Xiang\\Lian Cui experiment\\eyetracking data\\2.New Experiment\\4.analysis\\eyetracking\\2.Sycronized data')
outputpath = 'G:\\Resource for Xiang\\Lian Cui experiment\\eyetracking data\\2.New Experiment\\4.analysis\\eyetracking\\3.Converged data\\'
filelist = os.listdir()
namelist = [i.split(' ', 1)[0] for i in filelist]
name_unique = np.unique([i.split(' ', 1)[0] for i in filelist])
for subject in name_unique:
result = pd.DataFrame()
indexes = [i for i in range(len(namelist)) if namelist[i]==subject]
for i in indexes:
df_add = pd.read_csv(filelist[i])
result = result.append(df_add)
result.to_csv(outputpath + subject + '_2017.csv',index=False)
print('finish add {}'.format(subject))
#result = pd.concat(df)
|
from verce.processing import *
import socket
import traceback
import json
class specfemMakeMovieSurface(SeismoPreprocessingActivity):
def compute(self):
try:
userconf = json.load(open(self.parameters["solver_conf_file"]))
fields = userconf["fields"]
conf={}
for x in fields:
conf.update({x["name"]:x["value"]})
if conf["MOVIE_SURFACE"]=="true":
stdoutdata, stderrdata = commandChain([["{}".format(
self.parameters["mpi_invoke"]+" python $RUN_PATH/verce-hpc-pe/src/mpi/create_movie_snapshot.py --files="+self.streams[2]+" --ext="+str(self.parameters["ext"])+" --ani --vmax="+str(self.parameters["vmax"])
)]],os.environ.copy())
self.outputstreams.append(os.getcwd()+"/../OUTPUT_FILES/simple_finalvideo.mp4")
self.streamItemsLocations.append("file://"+socket.gethostname()+"/"+os.getcwd()+"/../OUTPUT_FILES/simple_finalvideo.mp4")
self.error+=str(stderrdata)
except Exception,err:
traceback.print_exc(file=sys.stderr)
if __name__ == "__main__":
proc=specfemMakeMovie("specfemMakeMovieSurface")
proc.process();
|
import copy
def get_max_nodes_rooted_at(node, parent, graph):
children = copy.deepcopy(graph[node])
if parent != None:
children.remove(parent)
if len(children) == 0 or len(children) == 1:
return 1
results = []
for child in children:
results.append(get_max_nodes_rooted_at(child, node, graph))
results.sort(reverse=True)
return results[0]+results[1]+1
def min_nodes_to_delete(graph):
results = []
for node in xrange(len(graph)):
results.append(get_max_nodes_rooted_at(node, None, graph))
return len(graph) - max(results)
if __name__ == '__main__':
testcases = int(raw_input())
for testcase in xrange(1, testcases+1):
node_count = int(raw_input())
graph = [[] for i in xrange(node_count)]
for i in xrange(node_count-1):
x, y = raw_input().split()
x = int(x)
y = int(y)
graph[x-1].append(y-1)
graph[y-1].append(x-1)
print 'Case #'+ str(testcase) + ': ' + str(min_nodes_to_delete(graph))
|
import onmt
def split_line_by_char(line, word_list=["<unk>"]):
chars = list()
words = line.strip().split()
for i, word in enumerate(words):
if word in word_list:
chars.append(word)
else:
for c in word:
chars.append(c)
if i < (len(words) - 1):
chars.append(' ')
return chars
class Tokenizer(object):
def __init__(self, input_type='word', lower=False):
self.input_type = input_type
self.lower = lower
def __call__(self, sentence):
return self.tokenize(sentence)
def tokenize(self, sentence):
if self.input_type == "word":
tokens = sentence.strip().split()
elif self.input_type == "char":
tokens = split_line_by_char(sentence)
else:
raise NotImplementedError("Input type not implemented")
return tokens
FAIRSEQ_LANGUAGE_CODES = ["ar_AR",
"cs_CZ",
"de_DE",
"en_XX",
"es_XX",
"et_EE",
"fi_FI",
"fr_XX",
"gu_IN",
"hi_IN",
"it_IT",
"ja_XX",
"kk_KZ",
"ko_KR",
"lt_LT",
"lv_LV",
"my_MM",
"ne_NP",
"nl_XX",
"ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR",
"he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL",
"ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK",
"xh_ZA", "gl_ES", "sl_SI"]
class HuggingFaceTokenizer(object):
def __init__(self, pretrained_tokenizer):
if pretrained_tokenizer == 'facebook/mbart-large-50':
from transformers import MBart50TokenizerFast
tokenizer_ = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX")
else:
raise NotImplementedError
self._tokenizer = tokenizer_
def tokenize(self, text, src_lang=None):
if src_lang is not None:
found = False
for lang in FAIRSEQ_LANGUAGE_CODES:
if lang[:2] == src_lang:
self._tokenizer.src_lang = lang
found = True
break
if not found:
print("Language code %s not found" % lang)
raise NotImplementedError
# add special tokens, etc
tensor = self._tokenizer(text)['input_ids']
# convert back to text
tokens = self._tokenizer.convert_ids_to_tokens(tensor, skip_special_tokens=False)
return tokens
|
import json
import random
import re
import time
import uuid
from enum import Enum
from typing import List, Set, Optional
from room.participant import Participant
from utils import remove_chinese_punctuation, sum_dict
from .word_bank import Word, WordBank
class WordGuessingGameRole(str, Enum):
GUESSER = "guesser"
GIVER = "giver"
class WordResult(str, Enum):
SKIPPED = "skipped"
GUESSED = "guessed"
GUESSED_BACKTRANSLATE = "guessed-in-backtranslate"
INCOMPLETE = "incomplete"
PENALIZED = "penalized"
class WordGameState(str, Enum):
LOBBY = "LOBBY"
ROUND_ACTIVE = "ROUND_ACTIVE"
ROUND_FINISHED = "ROUND_FINISHED"
GAME_FINISHED = "GAME_FINISHED"
INITIAL_SCORE = {
"skipped": 0,
"guessed": 0,
"penalized": 0,
"described": 0,
"total_score": 0,
}
class WordGuessingGameRound:
def __init__(self, roles, word_results, score):
self.score = score
self.word_results = word_results
self.roles = roles
def to_dict(self):
return {
"score": self.score,
"word_results": [
{"word": word.translations, "result": result.value}
for word, result in self.word_results
],
"roles": self.roles,
}
class WordGuessingGame:
COUNTDOWN_TIME_SECONDS = 3
MIN_PLAYERS = 2
def __init__(self, participants, emit_fn, logger):
self.participants = participants
self.emit_fn = emit_fn
self.logger = logger.word_game_logger
self.log_dir = logger.word_game_log_dir
self.initialize()
@property
def guesser_languages(self):
return set(g.spoken_language for g in self.guessers)
@property
def giver_language(self):
return self.giver.spoken_language
@property
def ready_to_start(self):
return (
all(self.ready_states.get(p, False) for p in self.participants)
and len(self.ready_states) >= WordGuessingGame.MIN_PLAYERS
)
@property
def rounds_to_play(self):
"""
Total number of rounds to play in the game, accounting for people who may have
left part-way through
"""
return len(self.rounds) + sum(
p not in self.finished_givers_set for p in self.participants
)
def initialize(self):
self.rounds = []
self.word_bank = None
self.ready_players = set()
self.game_id = str(uuid.uuid4())[:8]
self.ready_states = {p: False for p in self.participants}
self.game_state = WordGameState.LOBBY
self.word = None # the word set being guessed right now
self.displayed_word = None # the specific word the giver sees
self.start_time = 0
self.end_time = 0
self.players_to_roles = {}
self.giver: Optional[Participant] = None
self.guessers: List[Participant] = []
self.word_results = []
self.finished_givers_set = set()
# Score for each player in this round (skipped score should be counted up)
self.round_score = {
user_id: INITIAL_SCORE.copy() for user_id in self.participants
}
# Score for all rounds so far
self.game_score = {
user_id: INITIAL_SCORE.copy() for user_id in self.participants
}
self.logger.info(f"Created word guessing game {self.game_id}")
def players_changed(self):
"""
Called when the set of players in the room has changed
"""
self.ready_states = {
p: self.ready_states.get(p, False) for p in self.participants
}
self.game_score = {
p: self.game_score.get(p, INITIAL_SCORE.copy()) for p in self.participants
}
self.round_score = {
p: self.round_score.get(p, INITIAL_SCORE.copy()) for p in self.participants
}
self.emit_fn(
"game-state-changed",
{
"ready_states": self.ready_states,
"rounds_to_play": self.rounds_to_play,
"game_score": self.game_score,
"round_score": self.round_score,
},
)
if (
self.game_state != WordGameState.LOBBY
and len(self.participants) < WordGuessingGame.MIN_PLAYERS
):
# If only one player remains, end the game
self.initialize()
self.emit_fn("game-ended", {})
# TODO: if the giver left mid-round, end the round right away
def player_ready(self, player_id):
self.ready_states[player_id] = True
self.emit_fn(
"game-state-changed",
{"ready_states": self.ready_states, "rounds_to_play": self.rounds_to_play},
)
def start_round(self, duration_seconds=10, max_skips=3):
self.start_time = time.time() + WordGuessingGame.COUNTDOWN_TIME_SECONDS
self.end_time = self.start_time + int(duration_seconds)
self.max_skips = max_skips
# init score for current round
self.round_score = {
p: self.round_score.get(p, INITIAL_SCORE.copy()) for p in self.participants
}
potential_givers = list(
set(self.participants).difference(self.finished_givers_set)
)
giver_id = random.choice(potential_givers)
guessers = list(filter(lambda p: p != giver_id, self.participants))
self.players_to_roles = {
giver_id: WordGuessingGameRole.GIVER,
**{user_id: WordGuessingGameRole.GUESSER for user_id in guessers},
}
self.guessers = [self.participants[guesser_id] for guesser_id in guessers]
self.giver = self.participants[giver_id]
self.game_state = WordGameState.ROUND_ACTIVE
game_languages = set([*self.guesser_languages, self.giver_language])
self.word_bank = WordBank.get_word_bank(game_languages)
self.word = self.word_bank.get_next()
self.displayed_word = random.choice(self.word.translations[self.giver_language])
self.word_results.append((self.word, WordResult.INCOMPLETE))
self.logger.info(f"Starting round {len(self.rounds) + 1}")
self.logger.info(f"Guessers: {self.guessers}")
self.logger.info(f"Giver: {self.giver}")
self.logger.info(f'Word is "{self.word}"')
return {
"start_time": self.start_time,
"end_time": self.end_time,
"round_number": len(self.rounds) + 1,
"max_skips": self.max_skips,
"roles": {p: r.value for p, r in self.players_to_roles.items()},
"round_score": self.round_score,
"game_score": self.game_score,
"word": self.displayed_word,
}
def send_new_word(self, reason: WordResult, speaker_id: str):
"""
speaker_id: the speaker's ID who is responsible for the reason.
"""
if reason == WordResult.SKIPPED:
for score in (self.round_score, self.game_score):
score[speaker_id]["skipped"] += 1
self.logger.info(f'Skipped word "{self.word}"')
elif reason == WordResult.GUESSED:
for score in (self.round_score, self.game_score):
score[speaker_id]["guessed"] += 1
score[self.giver.user_id]["described"] += 1
self.logger.info(f'Correctly guessed word "{self.word}"')
elif reason == WordResult.GUESSED_BACKTRANSLATE:
for score in (self.round_score, self.game_score):
score[speaker_id]["guessed"] += 1
score[self.giver.user_id]["described"] += 1
self.logger.info(
f'Correctly guessed word "{self.word}" (found in back translation)'
)
elif reason == WordResult.PENALIZED:
for score in (self.round_score, self.game_score):
score[speaker_id]["penalized"] += 1
self.logger.info(
f'Giver mentioned word "{self.word}", got one score penalized. Jump to next word.'
)
self.calculate_total_score()
self.word_results[-1] = (self.word_results[-1][0], reason)
prev_word = self.word.translations
self.word = self.word_bank.get_next()
self.displayed_word = random.choice(self.word.translations[self.giver_language])
self.word_results.append((self.word, WordResult.INCOMPLETE))
self.logger.info(f'Word is "{self.word}"')
self.emit_fn(
"new-word",
{
"round_score": self.round_score,
"game_score": self.game_score,
"word": self.displayed_word,
"previous_word": prev_word,
"reason": reason.value,
"actor": speaker_id,
},
)
def find_word(self, word: str, text: str, language: str) -> bool:
"""
Check if a word is in the transcript `text` using a match tailored for each language
"""
lang2match = {"en-US": "prefix", "es-ES": "prefix", "zh": "substring"}
match_type = lang2match[language]
# Normalize text
word = word.lower()
text = text.lower()
if match_type == "prefix":
# Prefix check: "son" will match "sons" but not "poisonous"
return re.search("\\b" + word, text)
elif match_type == "substring":
# Substring check: "son" will match "sons" and "poison"
# remove punctuations for Chinese
text = remove_chinese_punctuation(text)
return word in text
def on_transcript(self, speaker_id, transcript):
if time.time() > self.end_time:
return
speaker = self.participants.get(speaker_id)
if speaker is None:
# Speaker has left the call, unlikely but possible
return
speaker_language = speaker.spoken_language
# check if giver mentions word accidentally
if speaker_id == self.giver.user_id and self.find_word(
self.displayed_word, transcript, speaker_language
):
self.send_new_word(reason=WordResult.PENALIZED, speaker_id=speaker_id)
return
for word in self.word.translations[speaker_language]:
if self.find_word(word, transcript, speaker_language):
self.send_new_word(reason=WordResult.GUESSED, speaker_id=speaker_id)
break
def on_translation(self, speaker_id, language, translation):
if (
time.time() > self.end_time
or self.players_to_roles.get(speaker_id, None)
!= WordGuessingGameRole.GUESSER
or language != self.giver_language
):
return
# Working with the guesser's transcript in the giver's language
for word in self.word.translations[language]:
if self.find_word(word, translation, language):
self.send_new_word(
reason=WordResult.GUESSED_BACKTRANSLATE, speaker_id=speaker_id
)
break
def skip_word(self):
if time.time() > self.end_time:
return
if self.round_score[self.giver.user_id]["skipped"] >= self.max_skips:
return
self.send_new_word(reason=WordResult.SKIPPED, speaker_id=self.giver.user_id)
def end_round(self):
if self.game_state == WordGameState.ROUND_ACTIVE:
self.finished_givers_set.add(self.giver.user_id)
is_final = all(p in self.finished_givers_set for p in self.participants)
self.game_state = (
WordGameState.GAME_FINISHED
if is_final
else WordGameState.ROUND_FINISHED
)
self.ready_states = {k: False for k in self.ready_states}
self.rounds.append(
WordGuessingGameRound(
roles={p: r.value for p, r in self.players_to_roles.items()},
word_results=self.word_results,
score=self.round_score.copy(),
)
)
self.emit_fn(
"game-state-changed",
{
"ready_states": self.ready_states,
"rounds_to_play": self.rounds_to_play,
},
)
self.reset_round_score()
# Log score after round
self.save_logs()
self.word_results = []
self.emit_fn(
"round-ended",
{"is_final": is_final},
)
def calculate_total_score(self):
for score_type in (self.game_score, self.round_score):
for speaker, score in score_type.items():
score_type[speaker]["total_score"] = (
score["guessed"] + score["described"] - score["penalized"]
)
def reset_round_score(self):
self.round_score = {
user_id: INITIAL_SCORE.copy() for user_id in self.participants
}
def restart_if_necessary(self):
"""
Restart the game if it is at the end of the game
"""
if self.game_state != WordGameState.GAME_FINISHED:
return
self.initialize()
def save_logs(self):
self.logger.info(
f"End of round {len(self.rounds)}, score is {self.round_score}"
)
self.logger.info(
f"End of round {len(self.rounds)}, total score is {self.game_score}"
)
with open(self.log_dir / f"{self.game_id}.json", "w") as log_file:
json.dump(
{
"game_id": self.game_id,
"players": [
self.participants[player_id].to_dict()
if player_id in self.participants
else player_id
for player_id in self.players_to_roles
],
"rounds": [r.to_dict() for r in self.rounds],
},
log_file,
indent=2,
ensure_ascii=False,
)
|
import pygame
w,h = 800,600
window = pygame.display.set_mode((w,h))
bg = pygame.transform.scale(pygame.image.load("bg.jpg"), (w,h))
game = True
while game:
for e in pygame.event.get():
if e.type == pygame.QUIT:
game = False
window.blit(bg, (0,0))
pygame.display.update()
|
from sklearn.feature_extraction import DictVectorizer
from pandas import DataFrame
import numpy as np
from functools import *
from sklearn.ensemble import RandomForestClassifier
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
sns.set(color_codes=True)
tuned_parameters = {'n_estimators': [200, 250, 300],
'max_depth': [20, 30, 40, 50]}
# 'learning_rate':[0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8]
# }
# add learning_rate : float, optional (default=0.1) for GBT
# TODO : vectorize test and train data together, because there are values in test data not present in train data
scores = ['roc_auc']
to_delete_ = ['ANALWT_C', 'combined_df']
numerical = ['NRCH17_2', 'IRHH65_2','HLCNOTMO','HLCLAST','IRWELMOS','IIWELMOS','IRPINC3','IRFAMIN3']
remove_fatures = []
def remove_nan(artifact):
if pd.isnull(artifact):
return 'NA'
else:
return artifact
# a lot of very null columns happen to have great value
def is_many_null(df, column_name):
count_nan = len(df[column_name]) - df[column_name].count()
null_perct = 100.00 * (count_nan * 1.0 / len(df[column_name]))
return null_perct
def column_vals(df, col):
res = df[col].value_counts()
print('Column =', col)
print('# uniq values =', len(res))
print()
print(res)
return res
def format_data(df0, df_ts):
# df = shuffle(df0, random_state=0)
df = df0
train_size = df.shape[0]
print df.head()
y = df['Criminal']
df = df.drop('Criminal', axis=1)
assert isinstance(df, DataFrame)
df_combined = df.append(df_ts)
df_combined.fillna('NA', inplace=True)
if isinstance(df_combined, dict):
df_to_dict = df_combined
else:
df_to_dict = df_combined.to_dict(orient="records")
vec = DictVectorizer(sparse=False)
vec.fit(df_to_dict)
X = vec.transform(df_to_dict)
print('inside make model after one hot encoding= ', X.shape)
columns_names = vec.feature_names_
input_dataframe = DataFrame(data=X, columns=columns_names)
# This part is removing un important columns
rf_clf = RandomForestClassifier(n_estimators=100, max_depth=10)
rf_clf.fit(X[0:train_size], y)
imp = rf_clf.feature_importances_
threshold_for_features = 0.001
for index, value in enumerate(imp):
if value <= threshold_for_features:
key = columns_names[index]
input_dataframe = input_dataframe.drop(key, axis=1)
temp3 = list(input_dataframe)
for feat in temp3:
if feat.endswith("=NA") or feat.endswith("=nan") or feat.endswith("=99"):
# print("dropping feature with no value = ", feat)
input_dataframe = input_dataframe.drop(feat, axis=1)
# This part was about removing un important columns
df_to_dict = input_dataframe.to_dict(orient="records")
vec = DictVectorizer(sparse=False)
vec.fit(df_to_dict)
print(" modified data frame ", input_dataframe.shape)
input_train_df = input_dataframe[0:train_size]
input_test_df = input_dataframe[train_size:]
with open('train_encoded_2.csv', 'wb') as infile:
input_train_df['Criminal'] = y
print("input df shape to csv ", input_train_df.shape)
input_train_df.to_csv(infile, index=False)
with open('test_encoded_2.csv', 'wb') as infile:
print("input df shape to csv ", input_test_df.shape)
input_test_df.to_csv(infile, index=False)
def format_string(field):
try:
if field is not None:
return "" + str(field) + ""
except:
return "NA"
return "NA"
def format_number(field):
try:
if field is not None:
return int(field)
else:
return -1
except:
return -1
return -1
def get_data():
name = 'criminal_train'
file = name + '.csv'
df = pd.read_csv(file, low_memory=False)
id_field = 'PERID'
df = df.drop(id_field, axis=1)
y = df['Criminal']
train_size = df.shape[0]
print "Total training data = ", train_size
name = 'criminal_test'
file = name + '.csv'
df_t = pd.read_csv(file, low_memory=False)
id_field = 'PERID'
df_t = df_t.drop(id_field, axis=1)
combined_df = df.append(df_t)
for key in list(df):
if key in numerical:
combined_df[key] = combined_df[key].apply(format_number)
else:
combined_df[key] = combined_df[key].astype(str)
combined_df[key] = combined_df[key].apply(format_string)
combined_df = combined_df.drop('ANALWT_C', axis=1)
combined_df = combined_df.drop('VESTR', axis=1)
combined_df.fillna('NA', inplace=True)
df_train = combined_df[0:train_size]
df_train['Criminal'] = y
print "Train shape", df_train.shape
df_test = combined_df[train_size:]
print "df test ", df_test.shape
# print('To delete = ',to_delete)
return df_train, df_test
if __name__ == "__main__":
df_tr, df_ts = get_data()
print("df train shape ", df_tr.shape)
format_data(df_tr, df_ts)
|
import tassle
import time
import multiprocessing
import numpy as np
from scipy.signal import welch
from dask.distributed import as_completed
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
def main(n=100, processes=64, days=1):
# Set up cluster
cluster = SLURMCluster(queue='regular',
project='tassle',
cores=processes,
memory="1 TB"
)
cluster.adapt(maximum_jobs=processes - 1)
client = Client(cluster)
# initialize results array
print("initializing with practice run")
t_start = time.time()
f, fft, psd = job(days)
psd_f, psd_m = psd
# if processes is None:
# # account for hyperthreading, we are NOT IO bound
# processes = multiprocessing.cpu_count() // 2
t = time.time()
result_num = 0
futures = client.map(job, days * np.ones(n))
for future, result in as_completed(futures, with_results=True):
result_num += 1
fft += result[1]
psd_m += result[2][1]
print("finished job ", result_num, " at ", time.time() - t, " seconds")
# when all results are gathered and summed, do the averaging
psd_m = psd_m / n
fft = fft / n
print("finished all in ", time.time() - t_start, "seconds")
np.savez_compressed("lineshape_results.npz", f=f, fft=fft,
psd_f=psd_f, psd_m=psd_m)
def job(days):
a = tassle.Axion()
t, r = a.do_sim(days)
fft = np.fft.rfft(r[0])
f = np.fft.rfftfreq(len(r[0]), 1 / (5 * a.frequency))
psd = welch(r[2], t[1] - t[0], nperseg=2**26)
return f, fft, psd
if __name__ == '__main__':
main()
|
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from .models import Event, InvitationsSent
class CreateEventSerializer(serializers.Serializer):
title = serializers.CharField()
venue = serializers.CharField()
start_time = serializers.DateTimeField()
end_time = serializers.DateTimeField()
private = serializers.BooleanField()
def create(self, validated_data):
return Event.objects.create(**validated_data)
class ListOfEvents(serializers.ModelSerializer):
class Meta:
model = Event
exclude = ('organizer', 'id',)
class InvitationsSerializer(serializers.ModelSerializer):
# email = serializers.EmailField()
class Meta:
model = InvitationsSent
exclude = ('organizer', 'accepted')
# def create(self, validated_data):
# return Event.objects.create(**validated_data)
class ListOfInvitedUsers(serializers.ModelSerializer):
class Meta:
model = InvitationsSent
exclude = ('organizer', 'id',)
class PublicEventsListSerializer(serializers.ModelSerializer):
class Meta:
model = Event
exclude = ('organizer', 'id', 'private')
class RegisterOrUnregisterForEvent(serializers.Serializer):
title = serializers.CharField()
accepted = serializers.BooleanField()
def update(self, instance, validated_data):
instance.accepted = validated_data.get('accepted', instance.accepted)
instance.save()
return instance
class LimitAttendeesSerializer(serializers.Serializer):
id = serializers.IntegerField()
max_attendees = serializers.IntegerField()
def update(self, instance, validated_data):
# instance.id = validated_data.get('event_id', instance.id)
instance.max_attendees = validated_data.get('max_attendees', instance.max_attendees)
instance.save()
return instance
class ListOfIndividuallyCreatedEventsSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = '__all__'
|
from numpy import*
from numpy.linalg import*
mat = array(eval(input("digite: ")))
vet = zeros(shape(mat)[0],dtype=int)
for i in range(size(vet)):
for j in range(7):
vet[i] += mat[i,j]
print(vet) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nopims', '0008_auto_20140910_0558'),
]
operations = [
migrations.AlterField(
model_name='master',
name='exploration_title',
field=models.ForeignKey(to='nopims.Well', null=True),
),
]
|
from mini.Lexer import *
from mini.Parser import *
from mini.Interpreter import *
################################################################################
## RUN
################################################################################
def run(a_file_name, a_command):
lexer = Lexer(a_file_name, a_command)
tokens, error = lexer.make_tokens()
if error:
return None, error
## Generate Abstract-Syntax-Tree ###########################################
parser = Parser(tokens)
ast = parser.parse()
if ast.error:
return None, ast.error
## Run Program #############################################################
interpreter = Interpreter()
result = interpreter.visit(ast.node)
if result.error:
return None, result.error
else:
return result.result.value, None |
#!/usr/bin/python
#
# Copyright (C) 2010 Google Inc.
""" Builds SQL strings.
Builds SQL strings to pass to FTClient query method.
"""
import re
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
class SQL:
""" Helper class for building SQL queries """
def showTables(self):
""" Build a SHOW TABLES sql statement.
Returns:
the sql statement
"""
return 'SHOW TABLES'
def describeTable(self, table_id):
""" Build a DESCRIBE <tableid> sql statement.
Args:
table_id: the ID of the table to describe
Returns:
the sql statement
"""
return 'DESCRIBE {0}'.format(table_id)
def createTable(self, table):
""" Build a CREATE TABLE sql statement.
Args:
table: a dictionary representing the table. example:
{
"tablename":
{
"col_name1":"STRING",
"col_name2":"NUMBER",
"col_name3":"LOCATION",
"col_name4":"DATETIME"
}
}
Returns:
the sql statement
"""
table_name = table.keys()[0]
cols_and_datatypes = ",".join(["'{0}': {1}".format(col[0], col[1])
for col in table.get(table_name).items()])
return "CREATE TABLE '{0}' ({1})".format(table_name, cols_and_datatypes)
def select(self, table_id, cols=None, condition=None):
""" Build a SELECT sql statement.
Args:
table_id: the id of the table
cols: a list of columns to return. If None, return all
condition: a statement to add to the WHERE clause. For example,
"age > 30" or "Name = 'Steve'". Use single quotes as per the API.
Returns:
the sql statement
"""
stringCols = "*"
if cols: stringCols = ("'{0}'".format("','".join(cols))) \
.replace("\'rowid\'", "rowid") \
.replace("\'ROWID\'", "ROWID")
if condition: select = 'SELECT {0} FROM {1} WHERE {2}'.format(stringCols, table_id, condition)
else: select = 'SELECT {0} FROM {1}'.format(stringCols, table_id)
return select
def update(self, table_id, cols, values=None, row_id=None):
""" Build an UPDATE sql statement.
Args:
table_id: the id of the table
cols: list of columns to update
values: list of the new values
row_id: the id of the row to update
OR if values is None and type cols is a dictionary -
table_id: the id of the table
cols: dictionary of column name to value pairs
row_id: the id of the row to update
Returns:
the sql statement
"""
if row_id == None: return None
if type(cols) == type({}):
updateStatement = ""
count = 1
for col,value in cols.iteritems():
if type(value).__name__ == 'int':
updateStatement = '{0}{1}={2}'.format(updateStatement, col, value)
elif type(value).__name__ == 'float':
updateStatement = '{0}{1}={2}'.format(updateStatement, col, value)
else:
updateStatement = "{0}{1}='{2}'".format(updateStatement, col,
value.encode('string-escape'))
if count < len(cols): updateStatement = "{0},".format(updateStatement)
count += 1
return "UPDATE {0} SET {1} WHERE ROWID = '{2}'".format(table_id,
updateStatement, row_id)
else:
if len(cols) != len(values): return None
updateStatement = ""
count = 1
for i in range(len(cols)):
updateStatement = "{0}'{1}' = ".format(updateStatement, cols[i])
if type(values[i]).__name__ == 'int':
updateStatement = "{0}{1}".format(updateStatement, values[i])
elif type(values[i]).__name__ == 'float':
updateStatement = "{0}{1}".format(updateStatement, values[i])
else:
updateStatement = "{0}'{1}'".format(updateStatement,
values[i].encode('string-escape'))
if count < len(cols): updateStatement = "{0},".format(updateStatement)
count += 1
return "UPDATE {0} SET {1} WHERE ROWID = '{2}'".format(table_id, updateStatement, row_id)
def delete(self, table_id, row_id):
""" Build DELETE sql statement.
Args:
table_id: the id of the table
row_id: the id of the row to delete
Returns:
the sql statement
"""
return "DELETE FROM {0} WHERE ROWID = '{1}'".format(table_id, row_id)
def insert(self, table_id, values):
""" Build an INSERT sql statement.
Args:
table_id: the id of the table
values: dictionary of column to value. Example:
{
"col_name1":12,
"col_name2":"mystring",
"col_name3":"Mountain View",
"col_name4":"9/10/2010"
}
Returns:
the sql statement
"""
stringValues = ""
count = 1
cols = values.keys()
values = values.values()
for value in values:
if type(value).__name__=='int':
stringValues = '{0}{1}'.format(stringValues, value)
elif type(value).__name__=='float':
stringValues = '{0}{1}'.format(stringValues, value)
else:
value = value.replace("\\","\\\\")
value = value.replace("'","\\'")
stringValues = "{0}'{1}'".format(stringValues, value)
if count < len(values): stringValues = "{0},".format(stringValues)
count += 1
return 'INSERT INTO {0} ({1}) VALUES ({2})'\
.format(table_id, ','.join(["'{0}'".format(col) for col in cols]), stringValues)
def dropTable(self, table_id):
""" Build DROP TABLE sql statement.
Args:
table_id: the id of the table
Returns:
the sql statement
"""
return "DROP TABLE {0}".format(table_id)
def mergeTable(self,base_table_id,second_table_id,base_col,second_col,merge_table_name):
""" Build MERGE TABLE sql statement.
Args:
base_table_id: the id of the first table
second_table_id: the id of the second table
base_col: the name of the merge col in the base table
second_col: the name of the merge col in the second table
merge_table_name: new name of the merged table
Returns:
the sql statement
"""
query = []
query.append("CREATE VIEW '{0}' AS (".format(merge_table_name))
query.append("SELECT * ")
# Use the two lines below instead if you want to specify cols to include
# query.append("SELECT MyBaseTable.{0} AS myFirstColumn, ".format(base_col))
# query.append("MySecondBaseTable.{0} AS mySecondColumn ".format(second_col))
query.append("FROM {0} AS MyBaseTable ".format(base_table_id))
query.append("LEFT OUTER JOIN {0} AS MySecondBaseTable ".format(second_table_id))
# if use alias, can use those alias1 = alias2
query.append("ON MyBaseTable.{0} = MySecondBaseTable.{1})".format(base_col,second_col))
return ''.join(query)
if __name__ == '__main__':
pass
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
ds = pd.read_csv('train.csv')
#print(ds.head(10))
#print(ds.shape)
#print(ds.columns)
#print(ds.isnull().sum())
#print(ds.info())
#print(ds['price_range'].describe() , ds['price_range'].unique())
#ds = ds.drop(columns= 'blue')
#ds = ds.drop(columns= 'm_dep') # we have removed m_dep and blue columns because its a redundant feature which dosn't affect of price
#print(ds.columns)
#print( sns.boxplot(x= 'price_range' , y= 'talk_time' , data= ds) )
print('/////////////////////////////////////////////////////////////////////////////////////')
#Using KNN algorithm
X = ds.drop('price_range' , axis= 1)
Y = ds['price_range']
X_train , X_test , Y_train , Y_test = train_test_split(X , Y , test_size = 0.25 , random_state = 0)
KNN = KNeighborsClassifier(n_neighbors= 5, metric='minkowski', p=2)
KNN.fit(X_train , Y_train)
print(KNN.score(X_test , Y_test)) # note that accuracy of KNN is 93.33 %
print('/////////////////////////////////////////////////////////////////////////////////////////')
#Using Decision Tree Algorithm
decisionTree = DecisionTreeClassifier(random_state = 40)
decisionTree.fit(X_train,Y_train)
print(decisionTree.score(X_test , Y_test)) # note that accuracy of DTC is 80.33 % So KNN is better that DTC and note that DTC can used in classifier and regression
#So we will use KNN in prediction of mobile price
test_dataset = pd.read_csv('test.csv') #Getting the test data
test_dataset = test_dataset.drop('id',axis=1) #to match data set to trainig data or to be identical
print(test_dataset)
predicted_price = KNN.predict(test_dataset)
print(predicted_price)
|
import os
try:
os.makedirs('/data/test/01/02', mode=0o770)
except FileExistsError as e:
print(e)
|
from multiprocessing import Process, Queue
import time
def producer(q_production):
num = 1
while True:
print("q_production")
try:
q_production.put_nowait(num)
except:
pass
num = num + 1
def consumer(q_consume):
while True:
try:
num = q_consume.get_nowait()
except:
num = 0
pass
print("q_consume: %d" % num)
time.sleep(1)
if __name__ == "__main__":
q = Queue(10)
test1 = Process(target = producer, args = (q, ))
test1.start()
test2 = Process(target = consumer, args = (q, ))
test2.start() |
import paho.mqtt.client as paho
import socket
import ssl
import json
import base64
from imutils import paths
from pyimagesearch import config
from time import sleep
connflag = False
def on_connect(client, userdata, flags, rc): # func for making connection
global connflag
print("Connected to AWS")
connflag = True
print("Connection returned result: " + str(rc) )
def on_message(client, userdata, msg): # Func for Sending msg
print(msg.topic+" "+str(msg.payload))
mqttc = paho.Client() # mqttc object
mqttc.on_connect = on_connect # assign on_connect func
mqttc.on_message = on_message
mqtt_topic = "/firedetection/image"
awshost = "aqx8tvt4tgo70-ats.iot.eu-west-2.amazonaws.com" # Endpoint
awsport = 8883 # Port no.
clientId = "ObjectPWA" # Thing_Name
thingName = "ObjectPWA" # Thing_Name
caPath = "./certificates/AmazonRootCA1.pem" # Root_CA_Certificate_Name
certPath = "./certificates/a3199d5ead-certificate.pem.crt" # <Thing_Name>.cert.pem
keyPath = "./certificates/a3199d5ead-private.pem.key" # <Thing_Name>.private.key
mqttc.tls_set(caPath, certfile=certPath, keyfile=keyPath, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None) # pass parameters
mqttc.connect(awshost, awsport, keepalive=60) # connect to aws server # connect to aws server
mqttc.loop_start() # Start the loop
INTERVAL=10
count = 1
imagePaths = list(paths.list_images(config.OUTPUT_IMAGE_PATH))
def getBinaryImage(img):
f= open(img,'rb')
filecontent = f.read()
byteArr = bytearray(filecontent)
return byteArr
while True:
for img in imagePaths:
payloadmsg = getBinaryImage(img)
if connflag == True:
mqttc.publish(mqtt_topic, payloadmsg , qos=1) # topic: temperature # Publishing Temperature values
print(f"msg {count} sent") # Print sent temperature msg on console
count += 1
sleep(INTERVAL)
|
from django.db import models
from django.template.loader import render_to_string
class Room(models.Model):
def __unicode__(self):
return "%s" % (self.name,)
name = models.CharField(max_length=80)
description = models.TextField()
def send(self, msg):
for char in self.char_set.all():
char.send(msg)
def render(self, tname, ctx={}, skip=[]):
c = {'room':self}
c.update(ctx)
data = render_to_string(tname, c).strip()
for ch in self.char_set.all():
if ch not in skip:
ch._raw_send(data)
def exit(self, direction):
try:
return self.exits.get(keyword__exact=direction).dst
except Exit.DoesNotExist:
return None
class Exit(models.Model):
def __unicode__(self):
return "%s --%s--> %s" % (self.src, self.keyword, self.dst)
keyword = models.CharField(max_length=32)
src = models.ForeignKey(Room, related_name='exits')
dst = models.ForeignKey(Room, related_name='entries')
class Char(models.Model):
def __unicode__(self):
return "%s" % (self.nick,)
modified = models.DateTimeField(auto_now=True)
nick = models.CharField(max_length=32)
room = models.ForeignKey(Room)
reply = models.ForeignKey("self", null=True, blank=True)
is_npc = models.BooleanField(default=False)
drunk = models.IntegerField(default=0)
description = models.TextField()
def render(self, tname, ctx={}):
c = {'actor':self, 'room':self.room}
c.update(ctx)
self._raw_send(render_to_string(tname, c).strip())
def send(self, msg):
self.render('_default.txt', {'text': msg})
def send_to_others(self, msg):
data = render_to_string('_default.txt', {'actor': self, 'text': msg}).strip()
for ch in self.others_in_room():
ch._raw_send(data)
def _raw_send(self, raw_msg):
for conn in self.connection_set.all():
conn.send(raw_msg)
def render_to_others(self, tname, ctx={}):
c = {'actor':self, 'room':self.room}
c.update(ctx)
data = render_to_string(tname, c).strip()
for ch in self.others_in_room():
ch._raw_send(data)
def others_in_room(self):
for ch in self.room.char_set.all():
if ch != self:
yield ch
@classmethod
def online(cls):
return cls.objects.filter(connection__reply_to__isnull=False).distinct()
@classmethod
def broadcast(cls, tname, ctx, skip=[]):
data = render_to_string(tname, ctx).strip()
for ch in cls.online():
if ch not in skip:
ch._raw_send(data)
def connected(self):
if self.is_npc:
return True
return self.connection_set.count() > 0
class Npc(models.Model):
def __unicode__(self):
return "[%s]" % (self.char,)
codepath = models.CharField(max_length=128)
char = models.OneToOneField(Char)
class Connection(models.Model):
def __unicode__(self):
return "%s --> %s" % (self.reply_to, self.char)
modified = models.DateTimeField(auto_now=True)
reply_to = models.CharField(max_length=32)
char = models.ForeignKey(Char, null=True)
state = models.CharField(max_length=16, default="connected")
def send(self, msg):
_outbound(self.reply_to, msg)
def render(self, tname, ctx={}):
c = {'connection':self}
c.update(ctx)
self.send(render_to_string(tname, c).strip())
OUTBOUND={}
def _outbound(reply_to, message):
OUTBOUND.setdefault(reply_to, []).append(message)
|
#!/usr/bin/env python3
import hashlib
import os
import logging
import json
import uuid
import redis
import requests
import subprocess
SOS_URL = 'http://sos:8280'
SOS_PORT = '8280'
STATUS_OK = requests.codes['ok']
STATUS_BAD_REQUEST = requests.codes['bad_request']
STATUS_NOT_FOUND = requests.codes['not_found']
LOG = logging
REDIS_QUEUE_LOCATION = os.getenv('REDIS_QUEUE', 'localhost')
QUEUE_NAME = 'queue:thumbnail'
INSTANCE_NAME = uuid.uuid4().hex
LOG.basicConfig(
level=LOG.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
def watch_queue(redis_conn, queue_name, callback_func, timeout=30):
active = True
while active:
# Fetch a json-encoded task using a blocking (left) pop
packed = redis_conn.blpop([queue_name], timeout=timeout)
if not packed:
# if nothing is returned, poll a again
continue
_, packed_task = packed
# If it's treated to a poison pill, quit the loop
if packed_task == b'DIE':
active = False
else:
task = None
try:
task = json.loads(packed_task.decode("utf-8"))
except Exception:
LOG.exception('json.loads failed')
if task:
callback_func(task)
# def execute_factor(log, task):
# number = task.get('number')
# if number:
# number = int(number)
# log.info('Factoring %d', number)
# factors = [trial for trial in range(1, number + 1) if number % trial == 0]
# log.info('Done, factors = %s', factors)
# else:
# log.info('No number given.')
def execute_thumbnailing(log, task):
# step 1: Download video from SOS
log.info("in step 1")
log.info(task)
bucket = task.get('bucket')
obj = task.get('object')
filepath = "/"+bucket+"/"+obj
# headers = task.get('headers')
try:
headers = {'Range': 'bytes=0-'} # for now TODO change it
log.info(SOS_URL + filepath)
resp = requests.get(SOS_URL + filepath, headers=headers, stream=True)
if resp.status_code == STATUS_OK:
output = obj
with open('./' + output, 'wb') as handle:
for block in resp.iter_content(2048):
handle.write(block)
# step 2: Generate gif from video
log.info("in step 2")
gif_filename = task.get('gif_filename')
start_time = task.get('start')
try:
subprocess.call(["chmod", "+x", "videoToGif.sh"])
subprocess.call(["./videoToGif.sh", "./"+output, gif_filename, str(start_time)])
except subprocess.SubprocessError as e:
log.info(str(e)+": Failed to run gif generator")
return
# return execute_thumbnailing(log, task)
# step 3: Upload gif to SOS
log.info("in step 3")
resp = requests.post(SOS_URL + "/"+bucket+"/"+gif_filename+"?create")
if resp.status_code != STATUS_OK:
requests.delete(SOS_URL + "/"+bucket+"/"+gif_filename+"?delete")
requests.post(SOS_URL + "/" + bucket + "/" + gif_filename + "?create")
data = open('./'+gif_filename, 'rb').read()
resp = requests.put(url=SOS_URL + "/"+bucket+"/"+gif_filename+'?partNumber=1', data=data,
headers={'Content-Length': str(len(data)), 'Content-MD5': hashlib.md5(data).hexdigest()})
if resp.status_code == STATUS_OK:
requests.post(SOS_URL + "/" + bucket + "/" + gif_filename + "?complete")
log.info("====Conpleted uploading "+gif_filename+"====")
except (ConnectionError, TimeoutError) as e:
log.info(str(e)+": Failed to connect to SOS")
def main():
LOG.info('Starting a worker...')
LOG.info('Unique name: %s', INSTANCE_NAME)
host, *port_info = REDIS_QUEUE_LOCATION.split(':')
port = tuple()
if port_info:
port, *_ = port_info
port = (int(port),)
named_logging = LOG.getLogger(name=INSTANCE_NAME)
named_logging.info('Trying to connect to %s [%s]', host, REDIS_QUEUE_LOCATION)
redis_conn = redis.Redis(host=host, *port)
watch_queue(
redis_conn,
QUEUE_NAME,
lambda task_descr: execute_thumbnailing(named_logging, task_descr))
if __name__ == '__main__':
main()
|
budget = float(input())
count_product = 0
total_price = 0
product = input()
while product != "Stop":
count_product += 1
price = float(input())
if count_product % 3 == 0:
price /= 2
total_price += price
if total_price > budget:
break
product = input()
if total_price <= budget:
print(f'You bought {count_product} products for {total_price:.2f} leva.')
else:
print(f'You don\'t have enough money!\nYou need {abs(budget - total_price):.2f} leva!')
|
import pymongo
from pymongo import Connection
#new authenticate in progress below:
def floop(x,u,p):
res=x
for r in res:
if r['user']==(u):
if r['pw']==(p):
return True
return False
def mfloop(x,u):
res=x
for r in res:
if r['user']==(u):
return True
return False
def newAuth(uname,pword):
if len(uname) == 0 or len(pword) ==0:
return False
conn=Connection()
db=conn["userdb"]
#floop(db.testbase.find())
#db.testbase.drop()
#db.testbase.insert({'user':'moo', 'pw':'oink'})
return floop(db.testbase.find(),uname,pword)
#lazy copy and paste solutions for the win!
def authenticate(uname,pword):
return newAuth(uname,pword)
# method for testing purposes
def newUser(uname):
name = ['Mark', 'Sue', 'Sally', 'Sam']
if uname in name:
return False
else:
return True
def newUser(uname,pword):
if len(uname) == 0 or len(pword) ==0:
return False
conn=Connection()
db=conn["userdb"]
if mfloop(db.testbase.find(),uname):
return False
#floop(db.testbase.find())
#db.testbase.drop()
db.testbase.insert({'user':uname, 'pw':pword, 'artist':False, 'links':[], 'likes':[], 'space':0})
return True
def decrement_space(artist):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == artist:
r['space'] = r['space'] - 1
return r['space']
def set_space(artist,num):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == artist:
r['space'] = num
return r['space']
def get_space(artist):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == artist:
return r['space']
def add_link(artist,link):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == artist:
r['links'].append(link)
return True
return False
def make_artist(user):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == user:
r['artist'] = True
def get_artists():
answer = []
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['artist'] == True:
answer.append(r)
return answer
def is_artist(artist):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == artist:
return r['artist']
def add_like(user, link):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == user:
if r['likes'] == None:
newlikes = []
else:
newlikes = r['likes']
newlikes.append(link)
#print newlikes
db.testbase.update({"user": user}, { "$set": {"likes" : newlikes} })
def get_likes(user):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == user:
#print r['likes']
return r['likes']
return None
def find_links(artist):
conn=Connection()
db=conn["userdb"]
res = db.testbase.find()
for r in res:
if r['user'] == artist:
return r['links']
return None
#print newUser("moo","oinker")
#print authenticate("moo","oinker")
|
import numpy as np
from scipy.io import loadmat
import scipy.optimize as opt
import matplotlib.pyplot as plt
from PIL import Image
#loading DATA
data=loadmat('D:\Desktop\MACHINE LEARNING\Models\machine-learning-ex3\ex3\ex3data1.mat')
X=data['X']
y = data['y']
#loading Weights
weights=loadmat('D:\Desktop\MACHINE LEARNING\Models\machine-learning-ex3\ex3\weights_1000its.mat')
Theta1=weights['Theta1']
Theta2= weights['Theta2']
#useful function
def sigmoid(z):
return ((1)/(1+np.exp(-z)))
#defining function to predict values
def predict(Theta1,Theta2,X):
#adding a column of 1's to X before computing with Theta1
X=np.concatenate((np.ones((len(X),1)),X),axis=1)
a2=sigmoid(X.dot(Theta1.T))
#adding a column of 1's to a2 before computing with Theta2
a2=np.concatenate((np.ones((len(a2),1)),a2),axis=1)
a3=sigmoid(a2.dot(Theta2.T))
a3=a3.argmax(axis=1)+1
a3=np.array(a3)
return a3
prediction=predict(Theta1, Theta2, X)
def acurracy(prediction,y):
counter=0
for i in range(len(y)):
if y[i][0]==prediction[i]:
counter+=1
acc=(counter/len(y)*100)
print("Acurracy of the model: {}%".format(acc))
return acc
acurracy(prediction, y)
|
import docker
import os
import shutil
import uuid
import time
from docker.errors import *
# Start up docker client.
client = docker.DockerClient()
# Image uploaded to Docker; contains environment to run code for Java, Python, and C++.
IMAGE_NAME = 'dannyhp/coderpad_env'
# Code file created in temporary build directory.
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
TEMP_BUILD_DIR = '%s/tmp' % CURRENT_DIR
# Timeout for docker.
TIMEOUT_SETTING = 'timeout -s 2 5'
SOURCE_FILE_NAMES = {
"java" : "Solution.java",
"python" : "solution.py",
"c_cpp" : "solution.cpp"
}
BINARY_NAMES = {
"java" : "Solution",
"python" : "solution.py",
"c_cpp" : "a.out"
}
BUILD_COMMANDS = {
"java" : "javac",
"python" : "python3 -u",
"c_cpp" : "g++ -o a.out"
}
EXECUTE_COMMANDS = {
"java" : "java",
"python" : "python3",
"c_cpp" : "./"
}
# Run this command separately to load the image if not initially loaded onto system.
def load_image():
try:
client.images.get(IMAGE_NAME)
except ImageNotFound:
print('Image not found locally, loading from docker hub...')
client.images.pull(IMAGE_NAME)
except APIError:
print('Image not found locally, docker hub is not accessible.')
return
print('Image: [%s] loaded.' % IMAGE_NAME)
# Builds docker container and runs the code.
def build_and_execute(code, language):
result = {'build': None, 'run': None, 'error': None}
source_file_parent_directory = uuid.uuid4()
source_file_host_directory = '%s/%s' % (TEMP_BUILD_DIR, source_file_parent_directory)
source_file_guest_directory = '/test/%s' % (source_file_parent_directory)
make_dir(source_file_host_directory)
with open('%s/%s' % (source_file_host_directory, SOURCE_FILE_NAMES[language]), 'w') as source_file:
source_file.write(code)
try:
client.containers.run(
image=IMAGE_NAME,
command='%s %s' % (BUILD_COMMANDS[language], SOURCE_FILE_NAMES[language]),
volumes={source_file_host_directory: {'bind': source_file_guest_directory, 'mode': 'rw'}},
working_dir=source_file_guest_directory
)
print('Source built!')
result['build'] = True
except ContainerError as e:
print('Build failed!')
result['build'] = e.stderr
shutil.rmtree(source_file_host_directory)
return result
try:
temp_var = '%s %s %s' % (TIMEOUT_SETTING, EXECUTE_COMMANDS[language], BINARY_NAMES[language])
print(temp_var)
if EXECUTE_COMMANDS[language] == './':
temp_var = '%s %s%s' % (TIMEOUT_SETTING, EXECUTE_COMMANDS[language], BINARY_NAMES[language])
log = client.containers.run(
image=IMAGE_NAME,
command=temp_var,
volumes={source_file_host_directory: {'bind': source_file_guest_directory, 'mode': 'rw'}},
working_dir=source_file_guest_directory
)
print('Execution succeeded!')
result['run'] = log
except ContainerError as e:
print('Execution failed!')
result['run'] = e.stderr
shutil.rmtree(source_file_host_directory)
return result
shutil.rmtree(source_file_host_directory)
return result
def make_dir(directory):
try:
os.mkdir(directory)
print('Temporary build directory [%s] created.' % directory)
except OSError:
print('Temporary build directory [%s] exists.' % directory)
|
import torch
from torch import nn, tensor, bool
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from torch.nn.utils import prune
from torch.nn import Conv2d, Conv1d
class VariationalDropout(object):
def __init__(self, modules, normal_stddev=1., initial_logalpha=-12., logalpha_threshold=3., **kwargs):
"""
We can treat VariationalDropout as hooks container
??: How it will be saved/loaded with torch.save/torch.load
It should be all OK, because function/object - there is no big difference
The other question - how pickle treat the same object <self> while loading.
Will it create multiple VariationalDropout objects? - it's not good at all.
Parameters
==========
modules: list of (module, <dict with config>)
Usage
=====
vd = VariationalDropout([(model.linear, None)]) # all specified modules support vd
"""
self.modules = modules
self.normal_stddev = normal_stddev
self.initial_logalpha = initial_logalpha
self.logalpha_threshold = logalpha_threshold
self._modules_dict = None
self._forward_hooks = list()
self._forward_pre_hooks = list()
self._build()
def _build(self):
"""
Add prehook and hook for all modules
"""
self._modules_dict = dict()
for _m, _cfg in self.modules:
_cfg = _cfg if _cfg is not None else dict()
self._modules_dict[_m] = _cfg
_w_name = _cfg.get("weight", "weight")
_w = getattr(_m, _w_name)
delattr(_m, _w_name)
_m.register_parameter(_w_name + "_orig", _w)
_la = Parameter(torch.full(_w.shape, _cfg.get("init_logalpha", -12.)))
_m.register_parameter(_w_name + "_logalpha", _la)
_m.register_buffer(_w_name + "_mask", torch.zeros(*_w.shape, dtype=torch.bool))
self._forward_pre_hooks.append(_m.register_forward_pre_hook(self.prehook))
self._forward_hooks.append(_m.register_forward_hook(self.hook))
def _base_prehook(self, module, _inputs):
_cfg = self._modules_dict[module]
_w_name = _cfg.get("weight", "weight")
# calculate masked weight
_mask = getattr(module, _w_name + "_mask")
_la = getattr(module, _w_name + "_logalpha")
with torch.no_grad():
_mask[:] = _la < self.logalpha_threshold
_weight = getattr(module, _w_name + "_orig") * _mask
setattr(module, _w_name, _weight)
def _base_hook(self, module, inputs, outputs):
pass
def _prehook_linear(self, module, inputs):
return self._base_prehook(module, inputs)
def _hook_linear(self, module, inputs, outputs):
_inp = inputs[0]
_w = module.weight
_la = module.weight_logalpha
_vd_add = torch.sqrt((_inp*_inp)@(torch.exp(_la)*_w*_w).t() + 1.0e-14)
_rand = torch.normal(0., self.normal_stddev, _vd_add.shape, device=_vd_add.device)
_vd_add = _rand*_vd_add
return outputs + _vd_add
def _prehook_conv2d(self, module, inputs):
return self._base_prehook(module, inputs)
def _hook_conv2d(self, module, inputs, outputs):
_inp = inputs[0]
_w = module.weight
_la = module.weight_logalpha
# convolve _inp*_inp with torch.exp(_la)*_w*_w, replace bias with None
_inp = _inp*_inp
_w = torch.exp(_la)*_w*_w
if module.padding_mode != 'zeros':
_vd_add = F.conv2d(F.pad(_inp, module._padding_repeated_twice, mode=module.padding_mode),
_w, None, module.stride,
torch.utils._pair(0), module.dilation, module.groups)
else:
_vd_add = F.conv2d(_inp, _w, None, module.stride,
module.padding, module.dilation, module.groups)
_vd_add = torch.sqrt(_vd_add + 1.0e-14)
_rand = torch.normal(0., self.normal_stddev, _vd_add.shape, device=_vd_add.device)
_vd_add = _rand * _vd_add
return outputs + _vd_add
def get_dkl(self, vd_lambda):
k1, k2, k3 = 0.63576, 1.8732, 1.48695
C = -k1
_res = 0.
for _m, _cfg in self._modules_dict.items():
_la = getattr(_m, _cfg.get("weight", "weight") + "_logalpha")
mdkl = k1 * torch.sigmoid(k2 + k3 * _la) - 0.5 * torch.log1p(torch.exp(-_la)) + C
_res += -torch.sum(mdkl)
return vd_lambda*_res
def remove(self):
for _hook in self._forward_pre_hooks + self._forward_hooks:
_hook.remove()
def get_supported_layers(self):
_prehook = set()
_hook = set()
for _el in dir(self):
if _el.startswith("_prehook_"):
_lr_name = _el[len("_prehook_"):]
_prehook.add(_lr_name)
elif _el.startswith("_hook_"):
_lr_name = _el[len("_hook_"):]
_hook.add(_lr_name)
return list(_prehook.intersection(_hook))
def prehook(self, module, input):
_method_name = "_prehook_" + module.__class__.__name__.lower()
return getattr(self, _method_name)(module, input)
def hook(self, module, input, output):
_method_name = "_hook_" + module.__class__.__name__.lower()
return getattr(self, _method_name)(module, input, output)
class Test(nn.Module):
def __init__(self):
super(Test, self).__init__()
self.dense1 = nn.Linear(2, 4)
self.dense2 = nn.Linear(4, 2)
def forward(self, inputs):
return self.dense2(self.dense1(inputs))
"""
- A little about torch:
* There are tensors (torch.Tensor)
* There are parameters (torch.nn.parameter.Parameter) - instance of torch.Tensor
- Forward-backward
*
- Parameter names stored aside parameters:
* list(module.parameters())
* list(module.named_parameters())
- Recursion inside module:
* list(module.named_children())
- Hooks for Tensors or Modules
* Module
* backward_hook: (module: nn.Module, grad_input: Tensor, grad_output: Tensor) -> Tensor or None
*
* forward_hook: (module: nn.Module, input: Tensor, output: Tensor) -> Tensor (modif. output) or None
* forward_pre_hook: (module: nn.Module, input: Tensor) -> Tensor (modif. input) or None
* Parameter
* hook: (grad: Tensor) -> Tensor or None
- Manage hooks:
* How to remove hook:
hook = module.register_forward_hook(...)
hook.remove()
- Hooks:
def hook(self, input): # input hook
pass
def hook(self, input, output): # output hook
pass
module.register_forward_hook(hook)
def back_hook(self, grad_input, grad_output):
pass
module.register_backward_hook(back_hook)
- Pruning and hooks:
""" |
class A():
def __init__(self):
self.__private()
self.public()
def __private(self):
print('__private() method of A')
def public(self):
print('public() method of A')
class B(A):
def __private(self):
print('__private() method of B')
def public(self):
print('public() method of B')
b=B()
print()
print('\n'.join(dir(b)))#查看对象B的成员
print()
class C(A):
def __init__(self):
self.__private()
self.public()
def __private(self):
print('__private() method of C')
def public(self):
print('public() method of C')
c=C()
print()
print('\n'.join(dir(c)))
|
__author__ = 'haywire'
from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity
from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
from yowsup.layers.protocol_acks.protocolentities import OutgoingAckProtocolEntity
from users import User
usersList = {}
class EchoLayer(YowInterfaceLayer):
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
#send receipt otherwise we keep receiving the same message over and over
phone = (messageProtocolEntity.getFrom().split("@"))[0]
usrMsg = messageProtocolEntity.getBody()
print "INCOMING: ", phone ,": ", usrMsg
# get this user's details
usr = self.getUser(phone)
# make sure basic questions have been answered.
response = usr.basicQuestions.askQuestions(usrMsg)
if response['basicDone'] != True:
self.respond(messageProtocolEntity, response['botMsg'])
return
else:
print "basic is done."
self.respond(messageProtocolEntity, "basic is done")
print "all done."
#usr.basicQuestions.askPickupQuestions(usrMsg)
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
ack = OutgoingAckProtocolEntity(entity.getId(), "receipt", entity.getType(), entity.getFrom())
self.toLower(ack)
def getUser(self, phone):
if phone in usersList:
usr = usersList[phone]
else :
usr = usersList[phone] = User()
return usr
def respond(self, messageProtocolEntity, msg):
receipt = OutgoingReceiptProtocolEntity(messageProtocolEntity.getId(), messageProtocolEntity.getFrom(), 'read', messageProtocolEntity.getParticipant())
outgoingMessageProtocolEntity = TextMessageProtocolEntity(
msg,
to = messageProtocolEntity.getFrom())
self.toLower(receipt)
self.toLower(outgoingMessageProtocolEntity)
|
from django.urls import path
from api import views
urlpatterns = [
path('login/', views.MyTokenObtainPairView.as_view(), name='login'),
path('signup/', views.SignUp.as_view(), name='signup'),
path('<str:store_uuid>/products/<str:barcode>/',
views.ProductView.as_view(), name='product-detail'),
path('stores/', views.StoreListView.as_view(), name='store-list'),
path('order/', views.OrderView.as_view(), name='order-create'),
path('orders/', views.OrderListView.as_view(), name='order-List'),
path('checkout/<str:uuid>/get_payment_url/', views.CheckoutLinkView.as_view(), name='get-link'),
path('checkout/complete/', views.CheckoutCompleteView.as_view(), name='checkout-complete'),
path('checkout/thankyou/', views.CheckoutThankyouView.as_view(), name='checkout-thankyou'),
]
|
# Example 1: asynchronous requests with larger thread pool
import asyncio
import concurrent.futures
import requests
import random
import time
import sys
import datetime
from numpy import random
counter = 0
generatedRequests = []
random.seed(0)
maxWorkers = int(sys.argv[1])
lambdaValue = int(sys.argv[2])
runningDuration = float(sys.argv[3])
fileName = str(sys.argv[4])
#numberOfRequests = int(sys.argv[1])
#runningDuration = float(sys.argv[2])
#fileName = str(sys.argv[3])
out = open(fileName, "a")
Links = open("/workspace/mediawiki-experiment/links.out", 'r')
links = Links.readlines()
links = [l.strip() for l in links]
host = 'http://192.168.245.53:8082'
#LINK = 'gw/index.php/Porsche_935'
print('Experiment is launching ...')
print('Experiment start time:', datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), file=out)
async def main(numberOfRequest):
global counter
with concurrent.futures.ThreadPoolExecutor(max_workers=maxWorkers) as executor:
#with concurrent.futures.ProcessPoolExecutor(max_workers=maxWorkers) as executor:
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(
executor,
requests.get,
str(host + random.choice(links))
)
for i in range(numberOfRequest)
]
for response in await asyncio.gather(*futures):
print(response.status_code, response.elapsed.total_seconds(), file=out)
counter = counter + 1
experiment_start_time = time.time()
totalRequest = 0
while (time.time() - experiment_start_time) < runningDuration:
numberOfRequest = random.poisson(lam=lambdaValue, size=1)[0]
loop = asyncio.get_event_loop()
loop.run_until_complete(main(numberOfRequest))
print('Current iteration number', counter)
print('Number of request', numberOfRequest)
generatedRequests.append(numberOfRequest)
totalRequest += numberOfRequest
print('Total number of request:', totalRequest)
print('Experiment finish time:', datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), file=out)
print ('Total number of iteration:', counter, file=out)
print('Total number of request:', totalRequest, file=out)
print('List of generated request numbers:', generatedRequests, file=out)
|
from django.core.management.base import BaseCommand
from django.db.models import Count, Avg, F
from annotation.models import Eficaz_prediction
import sys
###! Create pseudo-ROC curve?
class Command(BaseCommand):
help = 'COMMAND BRIEF'
def handle(self, *args, **options):
""" COMMAND DESCRIPTION
"""
## Cycle through command line arguments
num_args = 0
for arg in args:
num_args += 1
## Initiate counter
num_tot = len(input_files)
num_done = 0
next_progress = 1
sys.stdout.write("\r - %d %%" % num_done)
sys.stdout.flush()
## Counter
num_done += 1
if ((100 * num_done) / num_tot) > next_progress:
sys.stdout.write("\r - %d %%" % next_progress)
sys.stdout.flush()
next_progress += 1
## Finish counter
sys.stdout.write("\r - 100 %\n")
sys.stdout.flush()
|
import pandas as pd
import os
os.chdir("C:\\Users\\arman\\OneDrive\\Desktop\\2020\DataCamp\\15 Merging_Data_Frames_Pandas\\01_Preparing_Data\\Summer Olympic medals")
os.getcwd()
os.listdir("C:\\Users\\arman\\OneDrive\\Desktop\\2020\DataCamp\\15 Merging_Data_Frames_Pandas\\01_Preparing_Data\\Summer Olympic medals")
# Make a copy of gold: medals
medals = gold.copy()
# Create list of new column labels: new_labels
new_labels = ['NOC', 'Country', 'Gold']
# Rename the columns of medals using new_labels
medals.columns = new_labels
# Add columns 'Silver' & 'Bronze' to medals
medals['Silver'] = silver['Total']
medals['Bronze'] = bronze['Total']
# Print the head of medals
print(medals.head()) |
import csv
import pandas as pd
import numpy as np
import sys
sys.__stdout__ = sys.stdout
df = pd.read_csv('review.csv', delimiter=",", encoding='utf-8')
print(df.head())
header_rows = []
##group = x.groupby('business_id')['text'].unique()
##framed = group[group.apply(lambda x: len(x)>1)]
##print(framed.head())
####framed.to_csv('Sorted_reviews_final.csv')
##print("Completed processing of the CSV file")
header_rows = ['business_id', 'reviews']
group = df.groupby('business_id')['text'].unique()
framed = group[group.apply(lambda x: len(x)>1)]
print(framed.head())
framed.to_csv('Sorted_reviews_final2.csv', header=header_rows)
print("Completed processing of the CSV file")
|
#! /usr/bin/env python3
''' Python Script to understand the Python topics Scopes, Closures and Decorators '''
# Decorator Application - Decorator Class
# - This is because we can an object can be made callable
def dec_fac(a, b):
def dec(fn):
def inner(*args, **kwargs):
print('Decorated function called: a={}, b={}'.format(a, b))
return fn(*args, **kwargs)
return inner
return dec
@dec_fac(10, 20)
def my_func(s):
print('Hello {}'.format(s))
my_func('World')
# class MyClass:
# def __init__(self, a, b):
# self.a = a
# self.b = b
# def __call__(self, c):
# print('called a={}, b={}, c={}'.format(self.a, self.b, c))
# obj = MyClass(10, 20)
# obj(30)
# will give the output:
# called a=10, b=20, c=30
# Here we can make the class MyClass as a decorator factory and function __call__ as a decorator which means the callable
# object itself will be a decorator.
class MyClass:
def __init__(self, a, b):
self.a = a
self.b = b
def __call__(self, fn):
def inner(*args, **kwargs):
print('Decorated fucntion called a={}, b={}'.format(self.a, self.b))
return fn(*args, **kwargs)
return inner
@MyClass(10, 20)
def my_func(s):
print('Hello {}'.format(s))
print('\n\n---- Decorator Application ----')
print('---- Decorator Class ----')
my_func('World')
# Decorator Application - Decorating Class
#---- Exmaple-1 ----
from fractions import Fraction
f = Fraction(2, 3)
print('\n\n---- Decorating Class ----')
print('---- Example-1 ----')
print(f.denominator)
print(f.numerator)
# Fraction.is_integer = lambda self: self.denominator == 1
# This way, we can add the instance method dynamically at the run time. This is also called as Monkey-Patching.
# print(f.is_integer()) --> will give false.
# Now doing the above code (Monkey Patching) as an external fucntion (decorator)
def dec_is_integer(cls):
cls.is_integer = lambda self: self.denominator == 1
return cls
Fraction = dec_is_integer(Fraction)
print(f.is_integer())
#---- Eaxmple-2 ----
from datetime import datetime
def info(self):
results = []
results.append('time: {}'.format(datetime.now()))
results.append('Class: {}'.format(self.__class__.__name__))
results.append('id: {}'.format(hex(id(self))))
for k,v in vars(self).items():
results.append('{}: {}'.format(k, v))
return results
def debug_info(cls):
cls.debug = info
return cls
@debug_info
class Person:
def __init__(self, name, birth_year):
self.name = name
self.birth_year = birth_year
def say_hi(self):
return 'Hello {}'.format(self.name)
p = Person('Silver', 1999)
print('---- Example-2 ----')
p.debug()
# As from the above two example we can see that in first exmaple rather than using decorator, we can directly add the property,
# while when are needed to reuse it we can make a decorator like in example-2. Since, debug_info can be used for many different classes.
@debug_info
class Automobile:
def __init__(self, make, model, year, top_speed):
self.make = make
self.model = model
self.year = year
self.top_speed = top_speed
self._speed = 0
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, new_speed):
if new_speed > self.top_speed:
raise ValueError('Speed cannot exceed top_speed.')
else:
self._speed = new_speed
favourite = Automobile('Ford', 'Model T', 1908, 45)
print(favourite.debug())
# favourite.speed = 50 --> will give you the ValueError - Speed cannot exceed top_speed.
favourite.speed = 40
print(favourite.speed) # Returning speed property that is in-turn the self._speed
#---- Example-3 ----
# - Monkey Patching the ordering method in our class with the decorator `total_ordering` that is provided by the Python
from math import sqrt
# complete_ordering - our equivalent of total_ordering decorator - but is not such a good python code to be used
# and is just for reference.
# def complete_ordering(cls):
# if '__eq__' in dir(cls) and '__lt__' in dir(cls):
# cls.__le__ = lambda self, other: self < other or self == other
# cls.__ge__ = lambda self, other: not(self < other) and not(self == other)
# cls.__gt__ = lambda self, other: not(self < other)
# return cls
from functools import total_ordering
@total_ordering
class Point:
def __init__(self, x ,y):
self.x = x
self.y = y
def __abs__(self):
return sqrt(self.x ** 2 + self.y ** 2)
def __repr__(self):
return 'Point({}, {})'.format(self.x, self.y)
def __lt__(self, other):
if isinstance(other, Point):
return abs(self) < abs(other)
else:
return False
p1, p2, p3 = Point(2,3), Point(2,3), Point(0,0)
print('---- Example-3 ----')
print('Abs: ', abs(p1))
print('is')
print(p1 is p2)
print(p2 is p3)
print('==')
print(p1 == p2)
print(p1 == p3)
print('<')
print(p2 < p1)
print(p3 < p1)
print('>=')
print(p1 >= p2)
# For toatl_ordering to be working, only one of the >, <, >= or <= should be a defined method.
|
'''
LF2: Lambda proxy for API gateway to search photos based on user query
'''
import boto3
from requests_aws4auth import AWS4Auth
import json
import logging
import requests
import os
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
lex_bot_name = os.environ.get('LEX_BOT_NAME')
elastic_search_host = os.environ.get('ELASTIC_SEARCH_HOST')
elastic_search_region = os.environ.get('ELASTIC_SEARCH_REGION')
elastic_search_index = os.environ.get('ELASTIC_SEARCH_INDEX')
# For safe slot extraction
def try_ex(func):
try:
return func()
except KeyError:
return None
def lambda_handler(event, context):
logger.debug('EVENT:')
logger.debug(event)
logger.debug(context)
raw_query = try_ex(lambda: event['query'])
logger.debug(event['query'])
logger.debug(raw_query)
if not raw_query:
return {
'statusCode': 400,
'body': 'No query found in event'
}
# Use Lex to disambiguate query
keywords = []
client = boto3.client('lex-runtime')
response = client.post_text(
botName = lex_bot_name,
botAlias = '$LATEST',
userId = 'searchPhotosLambda',
inputText = raw_query
)
logger.debug('LEX Response:')
logger.debug(response)
slots = try_ex(lambda: response['slots'])
for _, v in slots.items():
if v: # ignore empty slots
keywords.append(v)
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, elastic_search_region, 'es', session_token=credentials.token)
query = '{}/{}/_search'.format(elastic_search_host, elastic_search_index)
headers = {'Content-Type': 'application/json'}
prepared_q = []
for k in keywords:
prepared_q.append({"match": {"labels": k}})
q = {"query": {"bool": {"should": prepared_q}}}
r = requests.post(query, auth=awsauth, headers=headers, data=json.dumps(q))
data = json.loads(r.content.decode('utf-8'))
logger.debug('Elastic Search Result')
logger.debug(data)
# Extract images
all_photos = []
prepend_url = 'https://s3.amazonaws.com'
hits = try_ex(lambda: data['hits']['hits'])
for h in hits:
photo = {}
obj_bucket = try_ex(lambda: h['_source']['bucket'])
obj_key = try_ex(lambda: h['_source']['objectKey'])
full_photo_path = '/'.join([prepend_url, obj_bucket, obj_key])
photo['url'] = full_photo_path
photo['labels'] = try_ex(lambda: h['_source']['labels'])
all_photos.append(photo)
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*",
"Content-Type": "application/json"
},
'body': {'results': all_photos}
} |
# intialize the batch size and number of epochs for training
batch_size = 32
epochs = 40
# test-train split ratio
split = 0.25
# path to save the features
path_feature_train = 'output/features_train.npy'
path_feature_test = 'output/features_test.npy'
# class weights, this is because there is class
# imbalance between man and woman. For 1 image of man
# there are 1.66 image of woman
classWeights = [1.66, 1]
# Dropout param, decreasing this will increase the
# overfitting(gap between val_acc and acc), althouh val_acc increases
param_dropout = 0.9
|
from __future__ import annotations
from prettyqt import constants, core, eventfilters, widgets
CC = widgets.QStyle.ComplexControl
SC = widgets.QStyle.SubControl
class SliderMoveToMouseClickEventFilter(eventfilters.BaseEventFilter):
ID = "slider_move_to_mouse_click"
def _move_to_mouse_position(self, scrollbar: widgets.QScrollBar, point: core.QPoint):
opt = widgets.StyleOptionSlider()
scrollbar.initStyleOption(opt)
control = scrollbar.style().hitTestComplexControl(
CC.CC_ScrollBar, opt, point, scrollbar
)
if control not in {SC.SC_ScrollBarAddPage, SC.SC_ScrollBarSubPage}:
return
# scroll here
gr = scrollbar.style().subControlRect(
CC.CC_ScrollBar, opt, SC.SC_ScrollBarGroove, scrollbar
)
sr = scrollbar.style().subControlRect(
CC.CC_ScrollBar, opt, SC.SC_ScrollBarSlider, scrollbar
)
if scrollbar.orientation() == constants.Orientation.Horizontal:
pos = point.x()
slider_length = sr.width()
slider_min = gr.x()
slider_max = gr.right() - slider_length + 1
if scrollbar.layoutDirection() == constants.LayoutDirection.RightToLeft:
opt.upsideDown = not opt.upsideDown
else:
pos = point.y()
slider_length = sr.height()
slider_min = gr.y()
slider_max = gr.bottom() - slider_length + 1
value = widgets.QStyle.sliderValueFromPosition(
scrollbar.minimum(),
scrollbar.maximum(),
pos - slider_min - slider_length // 2,
slider_max - slider_min,
opt.upsideDown,
)
scrollbar.setValue(value)
def eventFilter(self, source: widgets.QScrollBar, event: core.QEvent):
match event.type():
case core.QEvent.Type.MouseMove:
if event.buttons() & constants.MouseButton.LeftButton:
point = event.position().toPoint()
self._move_to_mouse_position(source, point)
case core.QEvent.Type.MouseButtonPress:
if event.button() == constants.MouseButton.LeftButton:
point = event.position().toPoint()
self._move_to_mouse_position(source, point)
return False
if __name__ == "__main__":
app = widgets.app()
widget = widgets.PlainTextEdit("gfdgdf\n" * 1000)
eventfilter = SliderMoveToMouseClickEventFilter(widget.v_scrollbar)
widget.v_scrollbar.installEventFilter(eventfilter)
widget.show()
app.exec()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
提供矩阵支持,以及矩阵相关的数值计算模块(包含最优化、线性代数、积分、插值、拟合、特殊函数、快速傅里叶变换、信号处理和图像处理、常微分方程求解等功能)
"""
|
import numpy as np
import cv2
import ccv
import cropper
import filefinder
import lbp
import tester
import classify
# define a main function
def main():
training = np.loadtxt("out.txt")
#training = filefinder.getTrainingData()
trainingdata = training[:,range(0,128)].astype(np.float32)
size = len(trainingdata)
responses = np.array(training[:,128]).reshape(size,1).astype(np.float32)
cc = classify.classifier(trainingdata, responses)
#tester.testme(knn)
tester.testme(cc)
print("end of program")
if __name__ == "__main__":
main()
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class Tag(models.Model):
description = models.CharField(max_length=200, blank=True, null=True, default=None, auto_created=True)
def __str__(self):
return self.description if self.description is not None else '????????'
class CocktailTag(models.Model):
cocktail = models.ForeignKey('Cocktail', on_delete=models.CASCADE, blank=True, null=True)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE, blank=True, null=True, default=None)
class Cocktail(models.Model):
title = models.CharField(max_length=200, default=None)
description = models.TextField(blank=True, null=True, default=None)
recipe = models.TextField(blank=True)
tags = models.ManyToManyField(Tag, related_name='cocktails', through=CocktailTag)
def __str__(self):
return self.title if self.title is not None else '????????'
class Unit(models.Model):
description = models.CharField(max_length=200, blank=True, null=True, default='(no title)')
def __str__(self):
return self.description if self.description is not None else '????????'
class Ingredient(models.Model):
description = models.CharField(max_length=200, blank=True, null=True, default='(no title)')
def __str__(self):
return self.description if self.description is not None else '????????'
class CocktailIngredientUnit(models.Model):
cocktail = models.ForeignKey(Cocktail, on_delete=models.CASCADE,
default=None, null=True, related_name='ingredients')
ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE, default=None, null=True)
value = models.FloatField(default=1.0, blank=False)
unit = models.ForeignKey(Unit, on_delete=models.CASCADE, default=None, null=True)
def __str__(self):
return '{} {} {}'.format(self.value, self.unit, self.ingredient)
|
from __future__ import unicode_literals
import socket, threading, os
import tornado.web, tornado.websocket, tornado.ioloop, tornado.iostream
import json
class NADClient(object):
def __init__(self, host, receive_callback):
self.host = host
self.port = 23
self.receive_callback = receive_callback
self.stream = None
self.socket = None
self.try_connect()
def try_connect(self):
print "trying to connect to receiver..."
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = tornado.iostream.IOStream(self.socket)
print 'Connecting to: %s' % self.host
self.stream.connect((self.host, self.port), self.send_request)
def send_request(self):
# self.stream.write("\n")
self.stream.read_until_close(self.on_close, self.handle_read)
def on_close(self, res):
self.stream.close()
def handle_connect(self):
pass
def handle_error(self):
print "problem reaching server."
self.try_connect()
def handle_read(self, data):
data = data.splitlines()[-1].strip()
self.receive_callback(data)
def write_data(self, data):
print 'Write', data
self.stream.write(str(data))
def close(self):
print 'Stopped NAD listener...'
self.stream.close()
class NADThread(threading.Thread):
def __init__(self, parent, ip_address):
threading.Thread.__init__(self)
self.client = NADClient(ip_address, parent.on_read)
def run(self):
print 'Starting NAD listner..'
def stop(self):
self.client.close()
#self.join()
def send_cmd(self, cmd):
self.client.write_data(cmd)
def send(self, key, value):
self.send_cmd('%s=%s\n' % (key, str(value)))
def ask_device(self, key):
self.send_cmd('%s?\n' % key)
class IndexHandler(tornado.web.RequestHandler):
'''Serve index page'''
def initialize(self, core):
self.core = core
@tornado.web.asynchronous
def get(request):
'''Index page'''
index_page = os.path.join(os.path.dirname(__file__), 'index.html')
request.render(index_page)
def data_received(self, data):
pass
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def initialize(self, core, config):
self.core = core
self.ip_address = config.get('nadsettings')['ip_address']
print self.ip_address
self.clients = []
self.nadclient = None
def open(self):
print("open", "WebSocketHandler")
self.clients.append(self)
if self.nadclient is None:
self.nadclient = NADThread(self, self.ip_address)
self.nadclient.start()
asks = ['Main.Volume', 'Main.Power', 'Main.Source']
for ask in asks:
self.nadclient.ask_device(ask)
def on_read(self, message):
if message is None:
return
msg = ParentTest().process_data(message)
if msg is None:
return
self.write_message(json.dumps(msg))
def on_message(self, message):
msg_dict = json.loads(message)
if msg_dict['type'] == 'Main.Volume':
vol = msg_dict['val']
db_vol = ParentTest().get_dbvol(float(vol))
self.nadclient.send('Main.Volume', db_vol)
return
if msg_dict['type'] in ('Main.Power', 'Main.Source'):
val = msg_dict['val']
msgtype = msg_dict['type']
self.nadclient.send(msgtype, val)
def on_close(self):
self.clients.remove(self)
if self.nadclient is not None:
self.nadclient.stop()
# Test class
class ParentTest(object):
_min_volume = -78
_max_volume = 0
def __init__(self):
#self.clientthread = NADThread(self)
self.clientthread = None
def start(self):
self.clientthread.start()
while True:
char = sys.stdin.read(1)
print 'You pressed %s' % char
if (char == 'q'):
self.clientthread.stop()
sys.exit()
if (char == 's'):
self._ask_device('Main.Power')
if (char == 'v'):
self._ask_device('Main.Volume')
if (char == 'd'):
self._dec_vol()
if (char == 'i'):
self._inc_vol()
if (char == 'o'):
self._send_cmd('Main.Power', 'on')
if (char == 'p'):
self._send_cmd('Main.Power', 'off')
def on_read(self, data):
print 'Received...'
self.process_data(data)
def _ask_device(self, key):
self.clientthread.send_cmd('%s?\n' % key)
def _send_cmd(self, key, value):
self.clientthread.send_cmd('%s=%s\n' % (key, value))
def _dec_vol(self):
self.clientthread.send_cmd('Main.Volume-\n')
def _inc_vol(self):
self.clientthread.send_cmd('Main.Volume+\n')
def process_data(self, data):
print data
if '=' in data:
ret = {}
key, value = data.split('=', 2)
if key == 'Main.Volume':
vol = self.get_volume(float(value.rstrip()))
ret['type'] = key
ret['val'] = str(int(vol))
if key in ('Main.Power', 'Main.Source'):
ret['type'] = key
ret['val'] = value
if len(ret) > 0:
return ret
return None
def get_volume(self, db):
norm = (self._min_volume - self._max_volume)
normdb = (db - self._max_volume)
percentage_volume = abs((-(normdb - norm) / (norm) ) * 100)
return percentage_volume
def set_volume(self, percent):
norm = (self._min_volume - self._max_volume)
db = ((-percent / 100) * norm) + (norm + self._max_volume)
def get_dbvol(self, percent):
norm = (self._min_volume - self._max_volume)
db = ((-percent / 100) * norm) + (norm + self._max_volume)
return int(db)
def sig_handler(sig, frame):
print 'Caught signal: %s' % sig
tornado.ioloop.IOLoop.instance().add_callback(shutdown)
def shutdown(self):
self.stop();
# -- Here we can run the web extension using a stand along tornado server.
#app = tornado.web.Application([(r'/nadws', WebSocketHandler), (r'/', IndexHandler)], debug=True)
#app.listen(8080)
#tornado.ioloop.IOLoop.instance().start()
|
from rest_framework import generics
from ..serializers import ToastingSerializer
from ..models import Toasting
class ToastingListCreateView(generics.ListCreateAPIView):
queryset = Toasting.objects.all()
serializer_class = ToastingSerializer
|
# Leetcode problem 13.
# Convert roman numerals to integers
def rom_to_int(num):
conv = {'M':1000,'D':500,'C':100,'L':50,'X':10,'V':5,'I':1}
res = 0
for i in range(len(num)):
value = conv[num[i]]
if i+1 < len(num) and conv[num[i+1]] > value:
res -= value
else:
res += value
return res
num_roman = 'MCMDXXI'
print(rom_to_int(num_roman)) |
#-*- coding: utf-8 -*-
from __future__ import print_function
import iterL1 as itr
import numpy as np
x = np.arange(1000)/1000.0
Gorig = np.column_stack((x,np.sin(2*np.pi*x*1000.0/200.0)))
G = np.column_stack((x,np.ones(x.size)))
tval = np.array([5,0.5])
y = np.dot(Gorig,tval)
m,S = itr.L1error_BS(G,y, ngroup=25, niter=40 ,scale=4)
print(m)
print(S)
############################################################
# Program is part of GIAnT v1.0 #
# Copyright 2012, by the California Institute of Technology#
# Contact: earthdef@gps.caltech.edu #
############################################################
|
from Artist import *
from User import *
from KNN import *
import random
import time
import numpy as np
import matplotlib.pyplot as plt
def readFile(filepath, filelist):
""" read the data file from the filepath/filename """
data = []
for filename in filelist:
f = open(filepath+filename,"r")
filedata = []
# read the first line
line = f.readline()
# read the data of file
while line:
line = f.readline()
linedata = line.replace('\n','').split('\t')
if len(linedata) > 1:
filedata.append(linedata)
data.append(filedata)
return data
def splitTrainSet(userManager, percentage, userList = []):
"""split the train set by percentage, to """
if len(userList) == 0:
testUserIDList = random.sample(userManager, int(len(userManager)*percentage))
else:
testUserIDList = userList
testUserMostFavourite = {}
testUserSet = {}
for userID in testUserIDList:
testUser = userManager.pop(userID)
testUserSet[userID] = testUser
artists = testUser.ArtistList
mostFavourite = {-1:0}
for artistID, listenTime in artists.iteritems():
if listenTime > mostFavourite.values()[0]:
mostFavourite = {artistID: listenTime}
testUserMostFavourite[userID] = mostFavourite
del testUser.ArtistList[mostFavourite.keys()[0]]
testUserSet[userID] = testUser
# UserManager[userID] = testUser
return testUserSet, testUserIDList, testUserMostFavourite
def splitTrainSetWithoutRemoving(userManager, percentage, userList = []):
"""split the train set by percentage, to """
if len(userList) == 0:
testUserIDList = random.sample(userManager, int(len(userManager)*percentage))
else:
testUserIDList = userList
testUserSet = {}
for userID in testUserIDList:
testUser = userManager.pop(userID)
testUserSet[userID] = testUser
return testUserSet, testUserIDList
def removeMostFav(userManager):
newUserManager = {}
for userID in userManager:
user = userManager[userID]
newUser = User(userID, user.ArtistList, user.FriendList, user.TagList, user.totalListenTime)
mostFavourite = user.getMostFav()
del newUser.ArtistList[mostFavourite.keys()[0]]
newUserManager[userID] = newUser
return newUserManager
def crossvalidation(userManager, artistManager, folders):
"""split data into folders and validate the performance"""
userIDs = userManager.keys()
userFolders = {}
for i in range(folders):
userFolders[i] = []
for userID in userIDs:
i = random.randrange(folders)
userFolders[i].append(userID)
for f in range(folders):
testUserSet, testUserIDList, testUserMostFavourite = splitTrainSet(userManager, 1.0/folders, userFolders[f])
knn = KNN(6)
knn.training(userManager, artistManager)
rightNum = 0
totalNum = len(testUserIDList)
for i in range(len(testUserIDList)):
print i, totalNum,
favOfOne = knn.testing(testUserSet[testUserIDList[i]], userManager, artistManager)
print testUserIDList[i], testUserMostFavourite[testUserIDList[i]].keys()[0], favOfOne
if favOfOne == testUserMostFavourite[testUserIDList[i]].keys()[0]:
rightNum += 1
print "Folder", f, ":"
print "Total:", totalNum
print float(rightNum)/len(testUserIDList)
for i in range(len(testUserIDList)):
userManager[testUserIDList[i]] = testUserSet[testUserIDList[i]]
if __name__ == "__main__":
# filepath = "test-data/"
filepath = "hetrec2011-lastfm-2k/"
filelist = ["artists.dat", "tags.dat", "user_artists.dat", "user_friends.dat", "user_taggedartists.dat"]
data = readFile(filepath, filelist)
RemoveListened = True
Animate = True
#create Artist Manager
ArtistManager = {}
for artist in data[0]:
# data[0]: artists.dat
# artist = [id name url pictureURL]
ArtistManager[int(artist[0])] = Artist(int(artist[0]),artist[1])
for tag in data[4]:
# data[4]: user_taggedartists.dat
# tag = [userID artistID tagID day month year]
if ArtistManager.has_key(int(tag[1])):
ArtistManager[int(tag[1])].insertTag(int(tag[2]))
for artistID, artist in ArtistManager.iteritems():
artist.tagNormalize()
# print ArtistManager[3]
#create User Manager
UserManager = {}
for user in data[2]:
# data[2]: user_artists.dat
# user = [userID artistID weight]
if not UserManager.has_key(int(user[0])):
UserManager[int(user[0])] = User(int(user[0]))
UserManager[int(user[0])].insertArt(int(user[1]),int(user[2]))
for friend in data[3]:
# data[3]: user_friends.dat
# friend = [userID friendID]
if UserManager.has_key(int(friend[0])):
UserManager[int(friend[0])].insertFriend(int(friend[1]))
for tag in data[4]:
# data[4]: user_taggedartists.dat
# tag = [userID artistID tagID day month year]
if UserManager.has_key(int(tag[0])):
UserManager[int(tag[0])].insertTag(int(tag[1]),int(tag[2]))
# normalize the listen count
for userID, user in UserManager.iteritems():
user.normalizeListenRecord()
# remove most fav of all, return a new userManager for train
TrainUserManager = removeMostFav(UserManager)
# 10 cross validation
# crossvalidation(UserManager, ArtistManager, 10)
# print UserManager
theSameNum = 0
inListenNum = 0
users = UserManager.keys()
for user in [5]:
testUserSet, testUserIDList, testUserMostFavourite = splitTrainSet(UserManager, 0, [user])
# testUserSet, testUserIDList = splitTrainSetWithoutRemoving(TrainUserManager, 0, [user])
knn = KNN(30)
knn.training(UserManager, ArtistManager)
for i in range(len(testUserIDList)):
testUserID = testUserIDList[i]
favOfOne, neighbors= knn.testing(testUserSet[testUserID], UserManager, ArtistManager, RemoveListened, Animate)
realfavOfOne = testUserMostFavourite[testUserID].keys()[0]
if favOfOne == realfavOfOne:
theSameNum += 1
# if testUserSet[testUserID].ArtistList.has_key(favOfOne):
# inListenNum += 1
# recovery modified TrainUserManager
UserManager[testUserID]=testUserSet[testUserID]
key = testUserMostFavourite[testUserID].keys()[0]
value = testUserMostFavourite[testUserID].values()[0]
UserManager[testUserID].ArtistList[key] = value
# print testUserSet[testUserIDList[i]]
# print testUserMostFavourite[testUserIDList[i]], favOfOne, testUserSet[testUserIDList[i]].ArtistList.pop(favOfOne, "cannot match any one")
print str(user), theSameNum, favOfOne
# f = open("NeighborFeaturedUsersTag.txt", "w")
# fc = open("NeighborUserLabel.txt","w")
# fd = open("NeighborDistance.txt","w")
# #userCounter = 0
# for n in neighbors:
# userID = n.keys()[0]
# feature = knn.Nodes[userID]
# for i in range(1, 12649): #there are 12648 tags in total
# if feature.has_key(i):
# f.write('{0:.20f}'.format(feature[i])+" ")
# else:
# f.write("0.0 ")
# f.write("\n")
# # write the favorite artistID as label
# favDic = UserManager[userID].getMostFav()
# favID = favDic.keys()[0]
# fc.write(str(favID) + "\n")
# distance = n.values()[0]
# GDistance = 1.0* math.exp(-distance**2 / 0.002) / math.sqrt(3.14*0.002)
# fd.write(str(GDistance)+"\n")
# f.close()
# fc.close()
# fd.close()
# print 1.0*theSameNum/len(UserManager)
# print 1.0*inListenNum/len(UserManager)
# print favOfOne
|
import RPi.GPIO as GPIO
import time
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
from RpiMotorLib import rpiservolib
import schedule
import functools
#Protect against exceptions
def catch_exceptions(cancel_on_failure=False):
def catch_exceptions_decorator(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
try:
return job_func(*args, **kwargs)
except:
import traceback
print(traceback.format_exc())
if cancel_on_failure:
return schedule.CancelJob
return wrapper
return catch_exceptions_decorator
# https://github.com/gavinlyonsrepo/RpiMotorLib/blob/master/Documentation/Servo_RPI_GPIO.md
myservotest = rpiservolib.SG90servo("servoone", 50, 3, 11)
servoPIN = 17
servoStartDegrees = 10
degrees = 10
#logfile = "/var/log/flomlog.txt"
# Calculate with 110degrees as middle flood (440 M3s),
# The 50 years flood is then at 175 degree (691 M3s)
# Define the job:
@catch_exceptions(cancel_on_failure=True)
def job():
global servoStartDegrees
global degrees
r = re.compile("verdi=\s*")
response = urlopen('https://www2.nve.no/h/hd/plotreal/Q/0208.00003.000/')
soup = BeautifulSoup(response.read(), 'html.parser')
for tag in soup.find_all(text=re.compile("verdi=\s*\d+.\d+")):
newlist = list(filter(r.match, tag.string.split()))
value = float(re.sub(r.pattern,'',newlist[0]))
print(value)
degrees = (value / 440) * 110
#to move a servo on GPIO pin servpPIN from 10 degrees to degrees in 3 degree steps every two seconds, with an initial delay of one second and verbose output.
myservotest.servo_move_step(servoPIN, servoStartDegrees, int(degrees), 2, 3, 1, True)
print("Moved to", degrees, "degrees")
servoStartDegrees = int(degrees)
# f = open(logfile, "a")
# f.write("value and Degrees")
# f.write(value)
# f.write(degrees)
# f.close()
# Run the job every 10 minutes
schedule.every(10).minutes.do(job)
#schedule.every(10).seconds.do(job)
while True:
schedule.run_pending()
time.sleep(1)
# good practise to cleanup GPIO at some point before exit
GPIO.cleanup()
|
import cv2
import numpy as np
import imutils
class ImageFeatures:
def __init__(self, bins):
self.bins = bins
def getFeatures(self, image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
features = []
(h, w) = image.shape[:2]
(cw, ch) = (int(w * 0.5), int(h * 0.5))
segments = [(0, cw, 0, ch), (cw, w, 0, ch), (cw, w, ch, h),
(0, cw, ch, h)]
for (startW, endW, startH, endH) in segments:
segmentMask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(segmentMask, (startW, startH), (endW, endH), 255, -1)
hist = self.histogram(image, segmentMask)
features.extend(hist)
return features
def histogram(self, image, mask):
hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins,
[0, 180, 0, 256, 0, 256])
if imutils.is_cv2():
hist = cv2.normalize(hist).flatten()
else:
hist = cv2.normalize(hist, hist).flatten()
return hist
def imageKeypoints(self, image):
img1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#sift
sift = cv2.xfeatures2d.SIFT_create()
keypoints, descriptors = sift.detectAndCompute(img1,None)
return keypoints, descriptors
def orbKeypoints(self, image):
'''
ORB is an open source implementation. A better alternate to SIFT. It detencts less keypoints but
given it is open source, so works okay.
'''
#print("orbKeypoints:image_path:%s"%image)
img1 = cv2.cvtColor(image.astype('uint8'), cv2.COLOR_BGR2GRAY) #cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
orb = cv2.ORB_create(nfeatures=2000)
keypoints, descriptors = orb.detectAndCompute(img1, None)
return keypoints, descriptors
|
class KMP:
def __init__(self, needle):
"""
https://ja.wikipedia.org/wiki/クヌース–モリス–プラット法
:param typing.Sequence needle: 何を検索するか
"""
self._needle = needle
kmp = [0] * (len(needle) + 2)
kmp[0] = -1
kmp[1] = 0
i = 2
j = 0
while i < len(needle):
if needle[i - 1] == needle[j]:
kmp[i] = j + 1
i += 1
j += 1
elif j > 0:
j = kmp[j]
else:
kmp[i] = 0
i += 1
self._kmp = kmp
def index_of(self, haystack, m=0, i=0):
"""
m + (haystack[m:] の何番目に needle があるか)
見つからなければ -1
needle[i:] のみ比較する
:param typing.Sequence haystack: 何から検索するか
:param int m: The position of the current character in haystack
:param int i: The position of the current character in needle
:rtype: int
"""
while m + i < len(haystack):
if self._needle[i] == haystack[m + i]:
i += 1
if i == len(self._needle):
return m
else:
m += i - self._kmp[i]
if i > 0:
i = self._kmp[i]
return -1
def search(self, haystack):
"""
ret[i]: haystack[i:i+len(needle)] == needle
:param typing.Sequence haystack: 何から検索するか
:rtype: list of bool
"""
ret = [False] * len(haystack)
m = 0
i = 0
while m + i < len(haystack):
m = self.index_of(haystack, m=m, i=i)
if m < 0:
break
ret[m] = True
m += len(self._needle) - self._kmp[len(self._needle) - 1] - 1
i = max(0, self._kmp[len(self._needle) - 1])
return ret
|
# Copyright (c) 2014-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
# Stdlib imports
import atexit
import os
import re
import ssl
import time
import urllib
import netaddr
from collections import MutableMapping
from copy import copy,deepcopy
from functools import wraps
# Third party imports
import yaml
from netaddr import IPNetwork
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
from pyVmomi import vim, vmodl
import requests
# Cloudify imports
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
# This package imports
from vsphere_plugin_common.constants import (
DEFAULT_CONFIG_PATH,
IP,
NETWORKS,
NETWORK_ID,
NETWORK_MTU,
TASK_CHECK_SLEEP,
VSPHERE_SERVER_CLUSTER_NAME,
VSPHERE_SERVER_HYPERVISOR_HOSTNAME,
VSPHERE_RESOURCE_NAME,
NETWORK_CREATE_ON,
NETWORK_STATUS,
VSPHERE_SERVER_ID,
VSWITCH_NAME,
VSWITCH_PNIC,
)
from collections import namedtuple
from cloudify_vsphere.utils.feedback import logger, prepare_for_log, check_name_for_special_characters
NIC_TEAMING_FAILURE_CRITERIA_VALUE = {
'checkSpeed': 'minimum',
'speed': 10,
'checkDuplex': False,
'fullDuplex': False,
'checkErrorPercent': False,
'percentage': 0,
'checkBeacon': False
}
DEFAULT_OFFLOAD_POLICY = {
'csumOffload': True,
'tcpSegmentation': True,
'zeroCopyXmit': True
}
LOAD_BALANCE = [
'loadbalance_ip',
'loadbalance_srcmac',
'loadbalance_srcid',
'failover_explicit'
]
NIC_TEAMING_FAILURE_CRITERIA = {
'checkSpeed': basestring,
'speed': int,
'checkDuplex': bool,
'fullDuplex': bool,
'checkErrorPercent': bool,
'percentage': int,
'checkBeacon': bool
}
#esxi_ip = ctx.node.properties['connection_config'].get('esxi_ip')
esxi_ip = '192.168.4.103'
def get_ip_from_vsphere_nic_ips(nic, ignore_local=True):
for ip in nic.ipAddress:
if (ip.startswith('169.254.') or ip.lower().startswith('fe80::')) \
and ignore_local:
# This is a locally assigned IPv4 or IPv6 address and thus we
# will assume it is not routable
logger().debug(
'Found locally assigned IP {ip}. Skipping.'.format(ip=ip))
continue
else:
return ip
# No valid IP was found
return None
def remove_runtime_properties(properties, context):
for p in properties:
if p in context.instance.runtime_properties:
del context.instance.runtime_properties[p]
class Config(object):
# Required during vsphere manager bootstrap
# Hard-coded to old path so old manager blueprints aren't broken
CONNECTION_CONFIG_PATH_DEFAULT = '/root/connection_config.yaml'
_path_options = [
{'source': '/root/connection_config.yaml', 'warn': True},
{'source': '~/connection_config.yaml', 'warn': True},
{'source': DEFAULT_CONFIG_PATH, 'warn': False},
{'env': True, 'source': 'CONNECTION_CONFIG_PATH', 'warn': True},
{'env': True, 'source': 'CFY_VSPHERE_CONFIG_PATH', 'warn': False},
]
def _find_config_file(self):
selected = DEFAULT_CONFIG_PATH
warnings = []
for path in self._path_options:
source = path['source']
if path.get('env'):
source = os.getenv(source)
if source:
source = os.path.expanduser(source)
if os.path.isfile(source):
selected = source
if path['warn']:
warnings.append(path['source'])
if warnings:
logger().warn(
"Deprecated configuration options were found: {}".format(
"; ".join(warnings)),
)
return selected
def get(self):
cfg = {}
config_path = self._find_config_file()
try:
with open(config_path) as f:
cfg = yaml.load(f.read())
except IOError:
logger().warn(
"Unable to read configuration file %s." % (config_path))
return cfg
class _ContainerView(object):
def __init__(self, obj_type, service_instance):
self.si = service_instance
self.obj_type = obj_type
def __enter__(self):
container = self.si.content.rootFolder
self.view_ref = self.si.content.viewManager.CreateContainerView(
container=container,
type=self.obj_type,
recursive=True,
)
return self.view_ref
def __exit__(self, *args):
self.view_ref.Destroy()
class CustomValues(MutableMapping):
"""dict interface to ManagedObject customValue"""
def __init__(self, client, thing):
"""
client: a VsphereClient instance
thing: a NamedTuple containing a ManagedObject-derived class as its
`obj` attribute: as supplied by `client._get_obj_by_name`
"""
self.client = client
self.thing = thing
def __getitem__(self, key):
key_id = self._get_key_id(key)
for value in self.thing.obj.customValue:
if value.key == key_id:
return value.value
raise KeyError(key)
def __setitem__(self, key, value):
self._get_key_id(key, create=True)
return self.thing.obj.setCustomValue(key, value)
def __delitem__(self, key):
raise NonRecoverableError("Unable to unset custom values")
def __iter__(self):
for value in self.thing.obj.customValue:
yield self._get_key_name(value.key)
def __len__(self):
return len(self.thing.obj.customValue)
def _get_key_id(self, k, create=False):
for key in self.client._get_custom_keys():
if key.name == k:
return key.key
if create:
try:
key = (
self.client.si.content.customFieldsManager.
AddCustomFieldDef)(name=k)
except vim.fault.DuplicateName:
self.client._get_custom_keys(use_cache=False)
return self._get_key_id(k, create=create)
return key.key
raise KeyError(k)
def _get_key_name(self, k):
for key in self.client._get_custom_keys():
if key.key == k:
return key.name
raise ValueError(k)
class VsphereClient(object):
def __init__(self):
self._cache = {}
self._logger = logger()
def get(self, config=None, *args, **kw):
static_config = Config().get()
self.cfg = {}
self.cfg.update(static_config)
if config:
self.cfg.update(config)
ret = self.connect(self.cfg)
ret.format = 'yaml'
return ret
def connect(self, cfg):
host = cfg['host']
username = cfg['username']
password = cfg['password']
port = cfg['port']
certificate_path = cfg.get('certificate_path')
# Until the next major release this will have limited effect, but is
# in place to allow a clear path to the next release for users
allow_insecure = cfg.get('allow_insecure', False)
ssl_context = None
if certificate_path and allow_insecure:
raise NonRecoverableError(
'Cannot connect when certificate_path and allow_insecure '
'are both set. Unable to determine whether connection should '
'be secure or insecure.'
)
elif certificate_path:
if not hasattr(ssl, '_create_default_https_context'):
raise NonRecoverableError(
'Cannot create secure connection with this version of '
'python. This functionality requires at least python '
'2.7.9 and has been confirmed to work on at least 2.7.12.'
)
if not os.path.exists(certificate_path):
raise NonRecoverableError(
'Certificate was not found in {path}'.format(
path=certificate_path,
)
)
elif not os.path.isfile(certificate_path):
raise NonRecoverableError(
'Found directory at {path}, but the certificate_path '
'must be a file.'.format(
path=certificate_path,
)
)
try:
# We want to load the cert into the existing default context
# in case any other python modules have already defined their
# default https context.
ssl_context = ssl._create_default_https_context()
if ssl_context.verify_mode == 0:
raise NonRecoverableError(
'Default SSL context is not set to verify. '
'Cannot use a certificate while other imported '
'modules are disabling verification on the default '
'SSL context.'
)
ssl_context.load_verify_locations(certificate_path)
except ssl.SSLError as err:
if 'unknown error' in str(err).lower():
raise NonRecoverableError(
'Could not create SSL context with provided '
'certificate {path}. This problem may be caused by '
'the certificate not being in the correct format '
'(PEM).'.format(
host=host,
path=certificate_path,
)
)
else:
raise
elif not allow_insecure:
self._logger.warn(
'DEPRECATED: certificate_path was not supplied. '
'A certificate will be required in the next major '
'release of the plugin if allow_insecure is not set '
'to true.'
)
try:
if allow_insecure:
self._logger.warn(
'SSL verification disabled for all legacy code. '
'Please note that this may result in other code '
'from the same blueprint running with reduced '
'security.'
)
self.si = SmartConnectNoSSL(host=host,
user=username,
pwd=password,
port=int(port))
else:
self.si = SmartConnect(host=host,
user=username,
pwd=password,
port=int(port),
sslContext=ssl_context)
atexit.register(Disconnect, self.si)
return self
except vim.fault.InvalidLogin:
raise NonRecoverableError(
"Could not login to vSphere on {host} with provided "
"credentials".format(host=host)
)
except vim.fault.HostConnectFault as err:
if 'certificate verify failed' in err.msg:
raise NonRecoverableError(
'Could not connect to vSphere on {host} with provided '
'certificate {path}. Certificate was not valid.'.format(
host=host,
path=certificate_path,
)
)
else:
raise
def is_server_suspended(self, server):
return server.summary.runtime.powerState.lower() == "suspended"
def _convert_props_list_to_dict(self, props_list):
the_dict = {}
split_list = [
item.split('.', 1) for item in props_list
]
vals = [
item[0] for item in split_list
if len(item) == 1
]
keys = [
item for item in split_list
if len(item) > 1
]
the_dict['_values'] = set(vals)
for item in keys:
key_name = item[0]
sub_keys = item[1:]
dict_entry = the_dict.get(key_name, {'_values': set()})
update_dict = self._convert_props_list_to_dict(
sub_keys
)
the_dict[key_name] = self._merge_props_dicts(
dict_entry,
update_dict,
)
return the_dict
def _merge_props_dicts(self, dict1, dict2):
new_dict = {}
keys = set(dict1.keys() + dict2.keys())
keys.remove('_values')
new_dict['_values'] = dict1['_values'] | dict2['_values']
for key in keys:
new_dict[key] = self._merge_props_dicts(
dict1.get(key, {'_values': set()}),
dict2.get(key, {'_values': set()})
)
return new_dict
def _get_platform_sub_results(self, platform_results, target_key):
sub_results = {}
for key, value in platform_results.items():
key_components = key.split('.', 1)
if key_components[0] == target_key:
sub_results[key_components[1]] = value
return sub_results
def _get_normalised_name(self, name, tolower=True):
"""
Get the normalised form of a platform entity's name.
"""
name = urllib.unquote(name)
return name.lower() if tolower else name
def _make_cached_object(self, obj_name, props_dict, platform_results,
root_object=True, other_entity_mappings=None,
use_cache=True):
just_keys = props_dict.keys()
# Discard the _values key if it is present
if '_values' in just_keys:
just_keys.remove('_values')
object_keys = copy(just_keys)
object_keys.extend(props_dict.get('_values', []))
if root_object:
object_keys.extend(['id', 'obj'])
object_keys = set(object_keys)
obj = namedtuple(
obj_name,
object_keys,
)
args = {}
for key in props_dict.get('_values', []):
args[key] = platform_results[key]
if root_object:
args['id'] = platform_results['obj']._moId
args['obj'] = platform_results['obj']
if root_object and other_entity_mappings:
for map_type in ('static', 'dynamic', 'single'):
mappings = other_entity_mappings.get(map_type, {})
for mapping, other_entities in mappings.items():
if map_type == 'single':
mapped = None
map_id = args[mapping]._moId
for entity in other_entities:
if entity.id == map_id:
mapped = entity
break
else:
mapping_ids = [
map_obj._moId for map_obj in args[mapping]
]
mapped = [
other_entity for other_entity in other_entities
if other_entity.id in mapping_ids
]
if (
map_type == 'static' and
len(mapped) != len(args[mapping])
):
mapped = None
if mapped is None:
return ctx.operation.retry(
'Platform {entity} configuration changed '
'while building {obj_name} cache.'.format(
entity=mapping,
obj_name=obj_name,
)
)
args[mapping] = mapped
for key in just_keys:
sub_object_name = '{name}_{sub}'.format(
name=obj_name,
sub=key,
)
args[key] = self._make_cached_object(
obj_name=sub_object_name,
props_dict=props_dict[key],
platform_results=self._get_platform_sub_results(
platform_results=platform_results,
target_key=key,
),
root_object=False,
)
if 'name' in args.keys():
args['name'] = self._get_normalised_name(args['name'], False)
result = obj(
**args
)
return result
#zhutao
def _get_entity(self, entity_name, props, vimtype, use_cache=False,
other_entity_mappings=None, skip_broken_objects=False):
if entity_name in self._cache and use_cache:
return self._cache[entity_name]
platform_results = self._collect_properties(
vimtype,
path_set=props,
)
props_dict = self._convert_props_list_to_dict(props)
results = []
for result in platform_results:
try:
results.append(
self._make_cached_object(
obj_name=entity_name,
props_dict=props_dict,
platform_results=result,
other_entity_mappings=other_entity_mappings,
use_cache=use_cache,
)
)
except KeyError as err:
message = (
'Could not retrieve all details for {type} object. '
'{err} was missing.'.format(
type=entity_name,
err=str(err)
)
)
if hasattr(result, 'name'):
message += (
' Object name was {name}.'.format(name=result.name)
)
if hasattr(result, '_moId'):
message += (
' Object ID was {id}.'.format(id=result._moId)
)
if skip_broken_objects:
self._logger.warn(message)
else:
raise NonRecoverableError(message)
self._cache[entity_name] = results
return results
def _build_resource_pool_object(self, base_pool_id, resource_pools):
rp_object = namedtuple(
'resource_pool',
['name', 'resourcePool', 'id', 'obj'],
)
this_pool = None
for pool in resource_pools:
if pool['obj']._moId == base_pool_id:
this_pool = pool
break
if this_pool is None:
return ctx.operation.retry(
'Resource pools changed while getting resource pool details.'
)
if 'name' in this_pool.keys():
this_pool['name'] = self._get_normalised_name(this_pool['name'],
False)
base_object = rp_object(
name=this_pool['name'],
id=this_pool['obj']._moId,
resourcePool=[],
obj=this_pool['obj'],
)
for item in this_pool['resourcePool']:
base_object.resourcePool.append(self._build_resource_pool_object(
base_pool_id=item._moId,
resource_pools=resource_pools,
))
return base_object
def _get_resource_pools(self, use_cache=True):
if 'resource_pool' in self._cache and use_cache:
return self._cache['resource_pool']
properties = [
'name',
'resourcePool',
]
results = self._collect_properties(
vim.ResourcePool,
path_set=properties,
)
resource_pools = []
for item in results:
resource_pools.append(self._build_resource_pool_object(
base_pool_id=item['obj']._moId,
resource_pools=results
))
self._cache['resource_pool'] = resource_pools
return resource_pools
def _get_vm_folders(self, use_cache=True):
properties = [
'name'
]
return self._get_entity(
entity_name='vm_folder',
props=properties,
vimtype=vim.Folder,
use_cache=use_cache,
)
def _get_clusters(self, use_cache=True):
properties = [
'name',
'resourcePool',
]
return self._get_entity(
entity_name='cluster',
props=properties,
vimtype=vim.ClusterComputeResource,
use_cache=use_cache,
other_entity_mappings={
'single': {
'resourcePool': self._get_resource_pools(
use_cache=use_cache,
),
},
},
)
def _get_datacenters(self, use_cache=True):
properties = [
'name',
'vmFolder',
]
return self._get_entity(
entity_name='datacenter',
props=properties,
vimtype=vim.Datacenter,
use_cache=use_cache,
)
def _get_datastores(self, use_cache=True):
properties = [
'name',
'overallStatus',
'summary.accessible',
'summary.freeSpace',
]
return self._get_entity(
entity_name='datastore',
props=properties,
vimtype=vim.Datastore,
use_cache=use_cache,
)
def _get_connected_network_name(self, network):
name = None
if network.get('from_relationship'):
net_id = None
found = False
for relationship in ctx.instance.relationships:
if relationship.target.node.name == network['name']:
props = relationship.target.instance.runtime_properties
net_id = props.get(NETWORK_ID)
found = True
break
if not found:
raise NonRecoverableError(
'Could not find any relationships to a node called '
'"{name}", so {prop} could not be retrieved.'.format(
name=network['name'],
prop=NETWORK_ID,
)
)
elif net_id is None:
raise NonRecoverableError(
'Could not get a {prop} runtime property from '
'relationship to a node called "{name}".'.format(
name=network['name'],
prop=NETWORK_ID,
)
)
if isinstance(net_id, list):
# We won't alert on switch_distributed mismatch here, as the
# validation logic handles that
# Standard port groups will have multiple IDs, but since we
# use the name, just using the first one will give the right
# name
net_id = net_id[0]
net = self._get_obj_by_id(
vimtype=vim.Network,
id=net_id,
)
if net is None:
raise NonRecoverableError(
'Could not get network given network ID: {id}'.format(
id=net_id,
)
)
name = net.name
else:
name = network['name']
return name
#zhutao
def _get_networks(self, use_cache=False):
if 'network' in self._cache and use_cache:
return self._cache['network']
properties = [
'name',
'host',
]
net_object = namedtuple(
'network',
['name', 'id', 'host', 'obj'],
)
dvnet_object = namedtuple(
'distributed_network',
['name', 'id', 'host', 'obj', 'key', 'config'],
)
host_stub = namedtuple(
'host_stub',
['id'],
)
results = self._collect_properties(
vim.Network,
path_set=properties,
)
extra_dv_port_group_details = self._get_extra_dv_port_group_details(
use_cache
)
networks = []
for item in results:
if 'name' in item.keys():
item['name'] = self._get_normalised_name(item['name'], False)
network = net_object(
name=item['name'],
id=item['obj']._moId,
host=[host_stub(id=h._moId) for h in item['host']],
obj=item['obj'],
)
if self._port_group_is_distributed(network):
extras = extra_dv_port_group_details[item['obj']._moId]
network = dvnet_object(
name=item['name'],
id=item['obj']._moId,
obj=item['obj'],
host=[host_stub(id=h._moId) for h in item['host']],
key=extras['key'],
config=extras['config'],
)
networks.append(network)
self._cache['network'] = networks
return networks
def _get_dv_networks(self, use_cache=True):
return [
network for network in self._get_networks(use_cache)
if self._port_group_is_distributed(network)
]
def _get_standard_networks(self, use_cache=False):
return [
network for network in self._get_networks(use_cache)
if not self._port_group_is_distributed(network)
]
def _get_extra_dv_port_group_details(self, use_cache=True):
if 'dv_pg_extra_detail' in self._cache and use_cache:
return self._cache['dv_pg_extra_detail']
properties = [
'key',
'config.distributedVirtualSwitch',
]
config_object = namedtuple(
'dv_port_group_config',
['distributedVirtualSwitch'],
)
results = self._collect_properties(
vim.dvs.DistributedVirtualPortgroup,
path_set=properties,
)
dvswitches = self._get_dvswitches(use_cache)
extra_details = {}
for item in results:
dvswitch_id = item['config.distributedVirtualSwitch']._moId
dvswitch = None
for dvs in dvswitches:
if dvswitch_id == dvs.id:
dvswitch = dvs
break
if dvswitch is None:
return ctx.operation.retry(
'DVswitches on platform changed while getting port '
'group details.'
)
extra_details[item['obj']._moId] = {
'key': item['key'],
'config': config_object(distributedVirtualSwitch=dvswitch),
}
self._cache['dv_pg_extra_detail'] = extra_details
return extra_details
def _get_dvswitches(self, use_cache=True):
properties = [
'name',
'uuid',
]
return self._get_entity(
entity_name='dvswitch',
props=properties,
vimtype=vim.dvs.VmwareDistributedVirtualSwitch,
use_cache=use_cache,
)
def _get_vms(self, use_cache=True, skip_broken_vms=True):
properties = [
'name',
'summary',
'config.hardware.device',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'datastore',
'guest.guestState',
'guest.net',
'network',
]
return self._get_entity(
entity_name='vm',
props=properties,
vimtype=vim.VirtualMachine,
use_cache=use_cache,
other_entity_mappings={
'static': {
'network': self._get_networks(use_cache=use_cache),
'datastore': self._get_datastores(use_cache=use_cache),
},
},
# VMs still being cloned won't return everything we need
skip_broken_objects=skip_broken_vms,
)
def _get_computes(self, use_cache=True):
properties = [
'name',
'resourcePool',
]
return self._get_entity(
entity_name='compute',
props=properties,
vimtype=vim.ComputeResource,
use_cache=use_cache,
other_entity_mappings={
'single': {
'resourcePool': self._get_resource_pools(
use_cache=use_cache,
),
},
},
)
#zhutao
def _get_hosts(self, use_cache=False):
properties = [
'name',
'parent',
'hardware.memorySize',
'hardware.cpuInfo.numCpuThreads',
'overallStatus',
'network',
'summary.runtime.connectionState',
'summary.runtime.inMaintenanceMode',
'vm',
'datastore',
'config.network',
# 'config.network.vswitch',
'configManager',
]
# A host's parent can be either a cluster or a compute, so we handle
# both here.
return self._get_entity(
entity_name='host',
props=properties,
vimtype=vim.HostSystem,
use_cache=use_cache,
other_entity_mappings={
'single': {
'parent': (self._get_clusters(use_cache=use_cache) +
self._get_computes(use_cache=use_cache)),
},
'dynamic': {
'vm': self._get_vms(use_cache=use_cache),
'network': self._get_networks(use_cache=use_cache),
},
'static': {
'datastore': self._get_datastores(use_cache=use_cache),
},
},
skip_broken_objects=True,
)
def _get_hosts_in_tree(self, host_folder):
def get_vmware_hosts(tree_node):
# Traverse the tree to find any hosts.
hosts = []
if hasattr(tree_node, "host"):
# If we find hosts under this node we are done.
hosts.extend(list(tree_node.host))
elif hasattr(tree_node, "childEntity"):
# If there are no hosts look under its children
for entity in tree_node.childEntity:
hosts.extend(get_vmware_hosts(entity))
return hosts
# Get all of the hosts in this hosts folder, that includes looking
# in subfolders and clusters.
vmware_hosts = get_vmware_hosts(host_folder)
# Cloudify uses a slightly different style of object to the raw VMWare
# API. To convert one to the other look up object IDs and compare.
vmware_host_ids = [host._GetMoId() for host in vmware_hosts]
cloudify_host_dict = {cloudify_host.obj._GetMoId(): cloudify_host
for cloudify_host in self._get_hosts()}
cloudify_hosts = [cloudify_host_dict[id] for id in vmware_host_ids]
return cloudify_hosts
def _convert_vmware_port_group_to_cloudify(self, port_group):
port_group_id = port_group._moId
for cloudify_port_group in self._get_networks():
if cloudify_port_group.obj._moId == port_group_id:
break
else:
raise RuntimeError(
"Couldn't find cloudify representation of port group {name}"
.format(name=port_group.name))
return cloudify_port_group
def _get_getter_method(self, vimtype):
getter_method = {
vim.VirtualMachine: self._get_vms,
vim.ResourcePool: self._get_resource_pools,
vim.ClusterComputeResource: self._get_clusters,
vim.Datastore: self._get_datastores,
vim.Datacenter: self._get_datacenters,
vim.Network: self._get_networks,
vim.dvs.VmwareDistributedVirtualSwitch: self._get_dvswitches,
vim.DistributedVirtualSwitch: self._get_dvswitches,
vim.HostSystem: self._get_hosts,
vim.dvs.DistributedVirtualPortgroup: self._get_dv_networks,
vim.Folder: self._get_vm_folders,
}.get(vimtype)
if getter_method is None:
raise NonRecoverableError(
'Cannot retrieve objects for {vimtype}'.format(
vimtype=vimtype,
)
)
return getter_method
def _collect_properties(self, obj_type, path_set=None):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Args:
si (ServiceInstance): ServiceInstance connection
view_ref (pyVmomi.vim.view.*):/ Starting point of inventory
navigation
obj_type (pyVmomi.vim.*): Type of managed object
path_set (list): List of properties to retrieve
Returns:
A list of properties for the managed objects
"""
with _ContainerView([obj_type], self.si) as view_ref:
collector = self.si.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['obj'] = obj.obj
data.append(properties)
return data
def _get_obj_by_name(self, vimtype, name, use_cache=True):
obj = None
entities = self._get_getter_method(vimtype)(use_cache)
name = self._get_normalised_name(name)
for entity in entities:
if name == entity.name.lower():
obj = entity
break
return obj
def _get_obj_by_id(self, vimtype, id, use_cache=False):
obj = None
entities = self._get_getter_method(vimtype)(use_cache)
for entity in entities:
if entity.id == id:
obj = entity
break
return obj
def _wait_for_task(self, task):
while task.info.state in (
vim.TaskInfo.State.queued,
vim.TaskInfo.State.running,
):
time.sleep(TASK_CHECK_SLEEP)
self._logger.debug('Task state {state}'
.format(state=task.info.state))
if task.info.state != vim.TaskInfo.State.success:
raise NonRecoverableError(
"Error during executing task on vSphere: '{0}'"
.format(task.info.error))
def _port_group_is_distributed(self, port_group):
return port_group.id.startswith('dvportgroup')
def get_vm_networks(self, vm):
"""
Get details of every network interface on a VM.
A list of dicts with the following network interface information
will be returned:
{
'name': Name of the network,
'distributed': True if the network is distributed, otherwise
False,
'mac': The MAC address as provided by vsphere,
}
"""
nics = []
self._logger.debug('Getting NIC list')
for dev in vm.config.hardware.device:
if hasattr(dev, 'macAddress'):
nics.append(dev)
self._logger.debug('Got NICs: {nics}'.format(nics=nics))
networks = []
for nic in nics:
self._logger.debug('Checking details for NIC {nic}'
.format(nic=nic))
distributed = hasattr(nic.backing, 'port') and isinstance(
nic.backing.port,
vim.dvs.PortConnection,
)
network_name = None
if distributed:
mapping_id = nic.backing.port.portgroupKey
self._logger.debug(
'Found NIC was on distributed port group with port group '
'key {key}'.format(key=mapping_id)
)
for network in vm.network:
if hasattr(network, 'key'):
self._logger.debug(
'Checking for match on network with key: '
'{key}'.format(key=network.key)
)
if mapping_id == network.key:
network_name = network.name
self._logger.debug(
'Found NIC was distributed and was on '
'network {network}'.format(
network=network_name,
)
)
else:
# If not distributed, the port group name can be retrieved
# directly
network_name = nic.backing.deviceName
self._logger.debug(
'Found NIC was on port group {network}'.format(
network=network_name,
)
)
if network_name is None:
raise NonRecoverableError(
'Could not get network name for device with MAC address '
'{mac} on VM {vm}'.format(mac=nic.macAddress, vm=vm.name)
)
networks.append({
'name': network_name,
'distributed': distributed,
'mac': nic.macAddress,
})
return networks
def _get_custom_keys(self, use_cache=True):
if not use_cache or 'custom_keys' not in self._cache:
self._cache['custom_keys'] = (
self.si.content.customFieldsManager.field
)
return self._cache['custom_keys']
def custom_values(self, thing):
return CustomValues(self, thing)
def add_custom_values(self, thing, attributes):
if attributes:
values = self.custom_values(thing)
values.update(attributes)
self._logger.debug('Added custom attributes')
class ServerClient(VsphereClient):
def _get_port_group_names(self):
all_port_groups = self._get_networks()
port_groups = []
distributed_port_groups = []
for port_group in all_port_groups:
if self._port_group_is_distributed(port_group):
distributed_port_groups.append(port_group.name.lower())
else:
port_groups.append(port_group.name.lower())
return port_groups, distributed_port_groups
def _validate_allowed(self, thing_type, allowed_things, existing_things):
"""
Validate that an allowed hosts, clusters, or datastores list is
valid.
"""
self._logger.debug(
'Checking allowed {thing}s list.'.format(thing=thing_type)
)
not_things = set(allowed_things).difference(set(existing_things))
if len(not_things) == len(allowed_things):
return (
'No allowed {thing}s exist. Allowed {thing}(s): {allow}. '
'Existing {thing}(s): {exist}.'.format(
allow=', '.join(allowed_things),
exist=', '.join(existing_things),
thing=thing_type,
)
)
elif len(not_things) > 0:
self._logger.warn(
'One or more specified allowed {thing}s do not exist: '
'{not_things}'.format(
thing=thing_type,
not_things=', '.join(not_things),
)
)
def _validate_inputs(self,
allowed_hosts,
allowed_clusters,
allowed_datastores,
template_name,
datacenter_name,
resource_pool_name,
networks):
"""
Make sure we can actually continue with the inputs given.
If we can't, we want to report all of the issues at once.
"""
self._logger.debug('Validating inputs for this platform.')
issues = []
hosts = self._get_hosts()
host_names = [host.name for host in hosts]
if allowed_hosts:
error = self._validate_allowed('host', allowed_hosts, host_names)
if error:
issues.append(error)
if allowed_clusters:
cluster_list = self._get_clusters()
cluster_names = [cluster.name for cluster in cluster_list]
error = self._validate_allowed(
'cluster',
allowed_clusters,
cluster_names,
)
if error:
issues.append(error)
if allowed_datastores:
datastore_list = self._get_datastores()
datastore_names = [datastore.name for datastore in datastore_list]
error = self._validate_allowed(
'datastore',
allowed_datastores,
datastore_names,
)
if error:
issues.append(error)
self._logger.debug('Checking template exists.')
template_vm = self._get_obj_by_name(vim.VirtualMachine,
template_name)
if template_vm is None:
issues.append("VM template {0} could not be found.".format(
template_name
))
self._logger.debug('Checking resource pool exists.')
resource_pool = self._get_obj_by_name(
vim.ResourcePool,
resource_pool_name,
)
if resource_pool is None:
issues.append("Resource pool {0} could not be found.".format(
resource_pool_name,
))
self._logger.debug('Checking datacenter exists.')
datacenter = self._get_obj_by_name(vim.Datacenter,
datacenter_name)
if datacenter is None:
issues.append("Datacenter {0} could not be found.".format(
datacenter_name
))
self._logger.debug(
'Checking networks exist.'
)
port_groups, distributed_port_groups = self._get_port_group_names()
for network in networks:
try:
network_name = self._get_connected_network_name(network)
except NonRecoverableError as err:
issues.append(str(err))
continue
network_name = self._get_normalised_name(network_name)
switch_distributed = network['switch_distributed']
list_distributed_networks = False
list_networks = False
# Check network exists and provide helpful message if it doesn't
# Note that we special-case alerting if switch_distributed appears
# to be set incorrectly.
# Use lowercase name for comparison as vSphere appears to be case
# insensitive for this.
if switch_distributed:
error_message = (
'Distributed network "{name}" not present on vSphere.'
)
if network_name not in distributed_port_groups:
if network_name in port_groups:
issues.append(
(error_message + ' However, this is present as a '
'standard network. You may need to set the '
'switch_distributed setting for this network to '
'false.').format(name=network_name)
)
else:
issues.append(error_message.format(name=network_name))
list_distributed_networks = True
else:
error_message = 'Network "{name}" not present on vSphere.'
if network_name not in port_groups:
if network_name in distributed_port_groups:
issues.append(
(error_message + ' However, this is present as a '
'distributed network. You may need to set the '
'switch_distributed setting for this network to '
'true.').format(name=network_name)
)
else:
issues.append(error_message.format(name=network_name))
list_networks = True
if list_distributed_networks:
issues.append(
(' Available distributed networks '
'are: {nets}.').format(
name=network_name,
nets=', '.join(distributed_port_groups),
)
)
if list_networks:
issues.append(
(' Available networks are: '
'{nets}.').format(
name=network_name,
nets=', '.join(port_groups),
)
)
if issues:
issues.insert(0, 'Issues found while validating inputs:')
message = ' '.join(issues)
#zhutao
ctx.logger.info('issues:%s'%str(issues))
return ctx.operation.retry(
'Waiting for port group xxx to be created on all '
'hosts.'
)
# raise NonRecoverableError(message)
def _validate_windows_properties(
self,
custom_sysprep,
windows_organization,
windows_password,
):
issues = []
if windows_password == '':
# Avoid falsey comparison on blank password
windows_password = True
if windows_password == '':
# Avoid falsey comparison on blank password
windows_password = True
if custom_sysprep is not None:
if windows_password:
issues.append(
'custom_sysprep answers data has been provided, but a '
'windows_password was supplied. If using custom sysprep, '
'no other windows settings are usable.'
)
elif not windows_password and custom_sysprep is None:
if not windows_password:
issues.append(
'Windows password must be set when a custom sysprep is '
'not being performed. Please supply a windows_password '
'using either properties.windows_password or '
'properties.agent_config.password'
)
if len(windows_organization) == 0:
issues.append('windows_organization property must not be blank')
if len(windows_organization) > 64:
issues.append(
'windows_organization property must be 64 characters or less')
if issues:
issues.insert(0, 'Issues found while validating inputs:')
message = ' '.join(issues)
raise NonRecoverableError(message)
def _add_network(self, network, datacenter):
network_name = network['name']
normalised_network_name = self._get_normalised_name(network_name)
switch_distributed = network['switch_distributed']
mac_address = network.get('mac_address')
use_dhcp = network['use_dhcp']
if switch_distributed:
for port_group in datacenter.obj.network:
# Make sure that we are comparing normalised network names.
normalised_port_group_name = self._get_normalised_name(
port_group.name
)
if normalised_port_group_name == normalised_network_name:
network_obj = \
self._convert_vmware_port_group_to_cloudify(port_group)
break
else:
self._logger.warning(
"Network {name} couldn't be found. Only found {networks}."
.format(name=network_name, networks=repr([
net.name for net in datacenter.obj.network])))
network_obj = None
else:
network_obj = self._get_obj_by_name(
vim.Network,
network_name,
)
if network_obj is None:
raise NonRecoverableError(
'Network {0} could not be found'.format(network_name))
nicspec = vim.vm.device.VirtualDeviceSpec()
# Info level as this is something that was requested in the
# blueprint
self._logger.info(
'Adding network interface on {name}'.format(
name=network_name))
nicspec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.add
#zhutao
nicspec.device = vim.vm.device.VirtualVmxnet3()
#nicspec.device = vim.vm.device.VirtualE1000()
nicspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nicspec.device.connectable.startConnected = True
nicspec.device.connectable.allowGuestControl = True
nicspec.device.connectable.connected = True
if switch_distributed:
info = vim.vm.device.VirtualEthernetCard\
.DistributedVirtualPortBackingInfo()
nicspec.device.backing = info
nicspec.device.backing.port =\
vim.dvs.PortConnection()
nicspec.device.backing.port.switchUuid =\
network_obj.config.distributedVirtualSwitch.uuid
nicspec.device.backing.port.portgroupKey =\
network_obj.key
else:
nicspec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nicspec.device.backing.network = network_obj.obj
nicspec.device.backing.deviceName = network_name
if mac_address:
nicspec.device.macAddress = mac_address
if use_dhcp:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
else:
nw = IPNetwork(network["network"])
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = network[IP]
guest_map.adapter.gateway = network["gateway"]
guest_map.adapter.subnetMask = str(nw.netmask)
return nicspec, guest_map
def _get_nic_keys_for_remove(self, server):
# get nics keys in template before our changes,
# we must use keys instead of mac addresses, as macs will be changed
# after VM create
keys = []
for device in server.config.hardware.device:
# delete network interface
if hasattr(device, 'macAddress'):
keys.append(device.key)
return keys
def _remove_nic_keys(self, server, keys):
self._logger.debug(
'Removing network adapters {keys} from vm. '
.format(keys=repr(keys)))
# remove nics by key
for key in keys:
devices = []
for device in server.config.hardware.device:
# delete network interface
if key == device.key:
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.device = device
self._logger.debug(
'Removing network adapter {key} from vm. '
.format(key=device.key))
nicspec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.remove
devices.append(nicspec)
if devices:
# apply changes
spec = vim.vm.ConfigSpec()
spec.deviceChange = devices
task = server.obj.ReconfigVM_Task(spec=spec)
self._wait_for_task(task)
# update server object
server = self._get_obj_by_id(
vim.VirtualMachine,
server.obj._moId,
use_cache=False,
)
def _update_vm(self, server, cdrom_image=None, remove_networks=False):
# update vm with attach cdrom image and remove network adapters
devices = []
ide_controller = None
cdrom_attached = False
for device in server.config.hardware.device:
# delete network interface
if remove_networks and hasattr(device, 'macAddress'):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.device = device
self._logger.warn(
'Removing network adapter {mac} from template. '
'Template should have no attached adapters.'
.format(mac=device.macAddress))
nicspec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.remove
devices.append(nicspec)
elif not remove_networks and hasattr(device, 'macAddress'):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.device = device
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nicspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nicspec.device.connectable.startConnected = True
nicspec.device.connectable.allowGuestControl = True
nicspec.device.connectable.connected = True
nicspec.device.connectable.status = 'ok'
self._logger.warn(
'1607 find network adapter {mac}' .format(mac=device.macAddress))
devices.append(nicspec)
ctx.logger.info('1607 devices = %s' % str(devices))
# remove cdrom when we have cloudinit
elif (
isinstance(device, vim.vm.device.VirtualCdrom) and
cdrom_image
):
self._logger.warn(
'Edit cdrom from template. '
'Template should have no inserted cdroms.')
cdrom_attached = True
# skip if cdrom is already attached
if isinstance(
device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo
):
if str(device.backing.fileName) == str(cdrom_image):
self._logger.info(
"Specified CD image is already mounted.")
continue
cdrom = vim.vm.device.VirtualDeviceSpec()
cdrom.device = device
device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(
fileName=cdrom_image)
cdrom.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.edit
connectable = vim.vm.device.VirtualDevice.ConnectInfo()
connectable.allowGuestControl = True
connectable.startConnected = True
device.connectable = connectable
devices.append(cdrom)
ide_controller = device.controllerKey
# ide controller
elif isinstance(device, vim.vm.device.VirtualIDEController):
# skip fully attached controllers
if len(device.device) < 2:
ide_controller = device.key
# attach cdrom
if cdrom_image and not cdrom_attached:
if not ide_controller:
raise NonRecoverableError(
'IDE controller is required for attach cloudinit cdrom.')
cdrom_device = vim.vm.device.VirtualDeviceSpec()
cdrom_device.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.add
connectable = vim.vm.device.VirtualDevice.ConnectInfo()
connectable.allowGuestControl = True
connectable.startConnected = True
cdrom = vim.vm.device.VirtualCdrom()
cdrom.controllerKey = ide_controller
cdrom.key = -1
cdrom.connectable = connectable
cdrom.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(
fileName=cdrom_image)
cdrom_device.device = cdrom
devices.append(cdrom_device)
return devices
def update_server(self, server, cdrom_image=None, extra_config=None):
# Attrach cdrom image to vm without change networks list
devices_changes = self._update_vm(server, cdrom_image=cdrom_image,
remove_networks=False)
if devices_changes or extra_config:
spec = vim.vm.ConfigSpec()
# changed devices
if devices_changes:
spec.deviceChange = devices_changes
# add extra config
if extra_config and isinstance(extra_config, dict):
self._logger.debug('Extra config: {config}'
.format(config=repr(extra_config)))
for k in extra_config:
spec.extraConfig.append(
vim.option.OptionValue(key=k, value=extra_config[k]))
task = server.obj.ReconfigVM_Task(spec=spec)
self._wait_for_task(task)
def _get_virtual_hardware_version(self, vm):
# See https://kb.vmware.com/s/article/1003746 for documentation on VM
# hardware versions and ESXi version compatibility.
return 13
# return int(vm.config.version.lstrip("vmx-"))
def create_server(
self,
auto_placement,
cpus,
datacenter_name,
memory,
networks,
resource_pool_name,
template_name,
vm_name,
windows_password,
windows_organization,
windows_timezone,
agent_config,
custom_sysprep,
custom_attributes,
os_type='linux',
domain=None,
dns_servers=None,
allowed_hosts=None,
allowed_clusters=None,
allowed_datastores=None,
cdrom_image=None,
vm_folder=None,
extra_config=None,
enable_start_vm=True,
postpone_delete_networks=False,
minimal_vm_version=13,
):
self._logger.debug(
"Entering create_server with parameters %s"
% prepare_for_log(locals()))
datacenter = self._get_obj_by_name(vim.Datacenter,
datacenter_name)
existing_pool_id = self._get_obj_by_name(
vim.ResourcePool,
resource_pool_name,
False
)
if existing_pool_id is None:
self._logger.debug(
"Entering create_resource_pool step")
dc = self.si.content.rootFolder.childEntity[0]
cr = dc.hostFolder.childEntity[0]
rootResourcePool = cr.resourcePool
configSpec = vim.ResourceConfigSpec()
cpuAllocationInfo = vim.ResourceAllocationInfo()
memAllocationInfo = vim.ResourceAllocationInfo()
sharesInfo = vim.SharesInfo(level='normal')
cpuAllocationInfo.reservation = 0
cpuAllocationInfo.expandableReservation = True
cpuAllocationInfo.shares = sharesInfo
cpuAllocationInfo.limit = -1
memAllocationInfo.reservation = 0
memAllocationInfo.expandableReservation = True
memAllocationInfo.shares = sharesInfo
memAllocationInfo.limit = -1
configSpec.cpuAllocation = cpuAllocationInfo
configSpec.memoryAllocation = memAllocationInfo
rp_obj = rootResourcePool.CreateResourcePool(resource_pool_name, configSpec)
while True:
existing_p_id = self._get_obj_by_name(
vim.ResourcePool,
resource_pool_name,
False
)
if existing_p_id:
self._logger.debug(
"Resource_pool created successful!")
break
# pool_id = self._get_obj_by_name(
# vim.ResourcePool,
# resource_pool_name,
# )
# if pool_id is not None:
# resource_pool_id = pool_id.id
# else:
# raise NonRecoverableError(
# 'Resource_pool create failed by this plugin.'
# )
else:
self._logger.debug(
"Resource_pool exists!")
self._validate_inputs(
allowed_hosts=allowed_hosts,
allowed_clusters=allowed_clusters,
allowed_datastores=allowed_datastores,
template_name=template_name,
networks=networks,
resource_pool_name=resource_pool_name,
datacenter_name=datacenter_name,
)
# If cpus and memory are not specified, take values from the template.
template_vm = self._get_obj_by_name(vim.VirtualMachine, template_name)
if not cpus:
cpus = template_vm.config.hardware.numCPU
if not memory:
memory = template_vm.config.hardware.memoryMB
# Correct the network name for all networks from relationships
for network in networks:
network['name'] = self._get_connected_network_name(network)
candidate_hosts = self.find_candidate_hosts(
datacenter=datacenter,
resource_pool=resource_pool_name,
vm_cpus=cpus,
vm_memory=memory,
vm_networks=networks,
allowed_hosts=allowed_hosts,
allowed_clusters=allowed_clusters,
)
host, datastore = self.select_host_and_datastore(
candidate_hosts=candidate_hosts,
vm_memory=memory,
template=template_vm,
allowed_datastores=allowed_datastores,
)
ctx.instance.runtime_properties[
VSPHERE_SERVER_HYPERVISOR_HOSTNAME] = host.name
ctx.instance.runtime_properties[
VSPHERE_SERVER_CLUSTER_NAME] = host.parent.name
self._logger.debug(
'Using host {host} and datastore {ds} for deployment.'.format(
host=host.name,
ds=datastore.name,
)
)
adaptermaps = []
resource_pool = self.get_resource_pool(
host=host,
resource_pool_name=resource_pool_name,
)
if not vm_folder:
destfolder = datacenter.vmFolder
else:
folder = self._get_obj_by_name(vim.Folder, vm_folder)
if not folder:
raise NonRecoverableError(
'Could not use vm_folder "{name}" as no '
'vm folder by that name exists!'.format(
name=vm_folder,
)
)
destfolder = folder.obj
relospec = vim.vm.RelocateSpec()
relospec.datastore = datastore.obj
relospec.pool = resource_pool.obj
if not auto_placement:
self._logger.warn(
'Disabled autoplacement is not recomended for a cluster.'
)
relospec.host = host.obj
# Get list of NIC MAC addresses for removal
keys_for_remove = []
if postpone_delete_networks and not enable_start_vm:
keys_for_remove = self._get_nic_keys_for_remove(template_vm)
if postpone_delete_networks and enable_start_vm:
self._logger.info("Using postpone_delete_networks with "
"enable_start_vm is unsupported.")
# attach cdrom image and remove all networks
devices = self._update_vm(template_vm, cdrom_image=cdrom_image,
remove_networks=not postpone_delete_networks)
port_groups, distributed_port_groups = self._get_port_group_names()
for network in networks:
nicspec, guest_map = self._add_network(network, datacenter)
devices.append(nicspec)
adaptermaps.append(guest_map)
vmconf = vim.vm.ConfigSpec()
vmconf.numCPUs = cpus
vmconf.memoryMB = memory
vmconf.cpuHotAddEnabled = True
vmconf.memoryHotAddEnabled = True
vmconf.cpuHotRemoveEnabled = True
vmconf.deviceChange = devices
clonespec = vim.vm.CloneSpec()
clonespec.location = relospec
clonespec.config = vmconf
clonespec.powerOn = enable_start_vm
clonespec.template = False
# add extra config
if extra_config and isinstance(extra_config, dict):
self._logger.debug('Extra config: {config}'
.format(config=repr(extra_config)))
for k in extra_config:
clonespec.extraConfig.append(
vim.option.OptionValue(key=k, value=extra_config[k]))
if adaptermaps:
self._logger.debug(
'Preparing OS customization spec for {server}'.format(
server=vm_name,
)
)
customspec = vim.vm.customization.Specification()
customspec.nicSettingMap = adaptermaps
if os_type is None or os_type == 'linux':
ident = vim.vm.customization.LinuxPrep()
if domain:
ident.domain = domain
ident.hostName = vim.vm.customization.FixedName()
ident.hostName.name = vm_name
elif os_type == 'windows':
if not windows_password:
if not agent_config:
agent_config = {}
windows_password = agent_config.get('password')
self._validate_windows_properties(
custom_sysprep,
windows_organization,
windows_password,
)
if custom_sysprep is not None:
ident = vim.vm.customization.SysprepText()
ident.value = custom_sysprep
else:
# We use GMT without daylight savings if no timezone is
# supplied, as this is as close to UTC as we can do
if not windows_timezone:
windows_timezone = 90
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
ident.guiUnattended = vim.vm.customization.GuiUnattended()
ident.identification = (
vim.vm.customization.Identification()
)
# Configure userData
ident.userData.computerName = (
vim.vm.customization.FixedName()
)
ident.userData.computerName.name = vm_name
# Without these vars, customization is silently skipped
# but deployment 'succeeds'
ident.userData.fullName = vm_name
ident.userData.orgName = windows_organization
ident.userData.productId = ""
# Configure guiUnattended
ident.guiUnattended.autoLogon = False
ident.guiUnattended.password = (
vim.vm.customization.Password()
)
ident.guiUnattended.password.plainText = True
ident.guiUnattended.password.value = windows_password
ident.guiUnattended.timeZone = windows_timezone
# Adding windows options
options = vim.vm.customization.WinOptions()
options.changeSID = True
options.deleteAccounts = False
customspec.options = options
elif os_type == 'solaris':
ident = None
self._logger.info(
'Customization of the Solaris OS is unsupported by '
' vSphere. Guest additions are required/supported.')
else:
ident = None
self._logger.info(
'os_type {os_type} was specified, but only "windows", '
'"solaris" and "linux" are supported. Customization is '
'unsupported.'
.format(os_type=os_type)
)
if ident:
customspec.identity = ident
globalip = vim.vm.customization.GlobalIPSettings()
if dns_servers:
globalip.dnsServerList = dns_servers
customspec.globalIPSettings = globalip
clonespec.customization = customspec
self._logger.info(
'Cloning {server} from {template}.'.format(
server=vm_name, template=template_name))
self._logger.debug('Cloning with clonespec: {spec}'
.format(spec=repr(clonespec)))
task = template_vm.obj.Clone(folder=destfolder,
name=vm_name,
spec=clonespec)
try:
self._logger.debug(
"Task info: {task}".format(task=repr(task)))
if enable_start_vm:
self._logger.info('VM created in running state')
self._wait_vm_running(task, adaptermaps, os_type == "other")
else:
self._logger.info('VM created in stopped state')
self._wait_for_task(task)
except task.info.error:
raise NonRecoverableError(
"Error during executing VM creation task. VM name: \'{0}\'."
.format(vm_name))
# VM object created. Now perform final post-creation tasks
vm = task.info.result
self._logger.info('VM version: vmx-{old}/vmx-{new}'.format(
old=self._get_virtual_hardware_version(vm),
new=minimal_vm_version))
if self._get_virtual_hardware_version(vm) < minimal_vm_version:
if enable_start_vm:
self._logger.info(
"Use VM hardware update with `enable_start_vm` is "
"unsupported.")
else:
self._logger.info("Going to update VM hardware version.")
try:
self._wait_for_task(vm.UpgradeVM_Task(
"vmx-{version}".format(version=minimal_vm_version)))
except NonRecoverableError as e:
raise NonRecoverableError(
"Could not upgrade the VM to a {version} hardware "
"version: {e}"
.format(e=str(e), version=minimal_vm_version)
)
# remove nic's by mac
if keys_for_remove:
vm = self._get_obj_by_id(
vim.VirtualMachine,
task.info.result._moId,
use_cache=False,
)
self._remove_nic_keys(vm, keys_for_remove)
# get new state of vm
vm = self._get_obj_by_id(
vim.VirtualMachine,
task.info.result._moId,
use_cache=False,
)
ctx.instance.runtime_properties[VSPHERE_SERVER_ID] = vm.obj._moId
ctx.instance.runtime_properties['name'] = vm_name
ctx.instance.runtime_properties[NETWORKS] = \
self.get_vm_networks(vm)
self._logger.debug(
'Updated runtime properties with network information')
self.add_custom_values(vm, custom_attributes or {})
return task.info.result
def suspend_server(self, server):
if self.is_server_suspended(server.obj):
self._logger.info("Server '{}' already suspended."
.format(server.name))
return
if self.is_server_poweredoff(server):
self._logger.info("Server '{}' is powered off so will not be "
"suspended.".format(server.name))
return
self._logger.debug("Entering server suspend procedure.")
task = server.obj.Suspend()
self._wait_for_task(task)
self._logger.debug("Server is suspended.")
def start_server(self, server):
if self.is_server_poweredon(server):
self._logger.info("Server '{}' already running"
.format(server.name))
return
self._logger.debug("Entering server start procedure.")
task = server.obj.PowerOn()
self._wait_for_task(task)
self._logger.debug("Server is now running.")
def shutdown_server_guest(
self, server,
timeout=TASK_CHECK_SLEEP,
max_wait_time=300,
):
if self.is_server_poweredoff(server):
self._logger.info("Server '{}' already stopped"
.format(server.name))
return
self._logger.debug("Entering server shutdown procedure.")
server.obj.ShutdownGuest()
for _ in range(max_wait_time // timeout):
time.sleep(timeout)
if self.is_server_poweredoff(server):
break
else:
raise NonRecoverableError(
"Server still running after {time}s timeout.".format(
time=max_wait_time,
))
self._logger.debug("Server is now shut down.")
def stop_server(self, server):
if self.is_server_poweredoff(server):
self._logger.info("Server '{}' already stopped"
.format(server.name))
return
self._logger.debug("Entering stop server procedure.")
task = server.obj.PowerOff()
self._wait_for_task(task)
self._logger.debug("Server is now stopped.")
def backup_server(self, server, snapshot_name, description):
if server.obj.snapshot:
snapshot = self.get_snapshot_by_name(
server.obj.snapshot.rootSnapshotList, snapshot_name)
if snapshot:
raise NonRecoverableError(
"Snapshot {snapshot_name} already exists."
.format(snapshot_name=snapshot_name,))
task = server.obj.CreateSnapshot(
snapshot_name, description=description,
memory=False, quiesce=False)
self._wait_for_task(task)
def get_snapshot_by_name(self, snapshots, snapshot_name):
for snapshot in snapshots:
if snapshot.name == snapshot_name:
return snapshot
else:
subsnapshot = self.get_snapshot_by_name(
snapshot.childSnapshotList, snapshot_name)
if subsnapshot:
return subsnapshot
return False
def restore_server(self, server, snapshot_name):
if server.obj.snapshot:
snapshot = self.get_snapshot_by_name(
server.obj.snapshot.rootSnapshotList, snapshot_name)
else:
snapshot = None
if not snapshot:
raise NonRecoverableError(
"No snapshots found with name: {snapshot_name}."
.format(snapshot_name=snapshot_name,))
task = snapshot.snapshot.RevertToSnapshot_Task()
self._wait_for_task(task)
def remove_backup(self, server, snapshot_name):
if server.obj.snapshot:
snapshot = self.get_snapshot_by_name(
server.obj.snapshot.rootSnapshotList, snapshot_name)
else:
snapshot = None
if not snapshot:
raise NonRecoverableError(
"No snapshots found with name: {snapshot_name}."
.format(snapshot_name=snapshot_name,))
if snapshot.childSnapshotList:
subsnapshots = [snap.name for snap in snapshot.childSnapshotList]
raise NonRecoverableError(
"Sub snapshots {subsnapshots} found for {snapshot_name}. "
"You should remove subsnaphots before remove current."
.format(snapshot_name=snapshot_name,
subsnapshots=repr(subsnapshots)))
task = snapshot.snapshot.RemoveSnapshot_Task(True)
self._wait_for_task(task)
def reset_server(self, server):
if self.is_server_poweredoff(server):
self._logger.info(
"Server '{}' currently stopped, starting.".format(server.name))
return self.start_server(server)
self._logger.debug("Entering stop server procedure.")
task = server.obj.Reset()
self._wait_for_task(task)
self._logger.debug("Server has been reset")
def reboot_server(
self, server,
timeout=TASK_CHECK_SLEEP,
max_wait_time=300,
):
if self.is_server_poweredoff(server):
self._logger.info(
"Server '{}' currently stopped, starting.".format(server.name))
return self.start_server(server)
self._logger.debug("Entering reboot server procedure.")
start_bootTime = server.obj.runtime.bootTime
server.obj.RebootGuest()
for _ in range(max_wait_time // timeout):
time.sleep(timeout)
if server.obj.runtime.bootTime > start_bootTime:
break
else:
raise NonRecoverableError(
"Server still running after {time}s timeout.".format(
time=max_wait_time,
))
self._logger.debug("Server has been rebooted")
def is_server_poweredoff(self, server):
return server.obj.summary.runtime.powerState.lower() == "poweredoff"
def is_server_poweredon(self, server):
return server.obj.summary.runtime.powerState.lower() == "poweredon"
def is_server_guest_running(self, server):
return server.obj.guest.guestState == "running"
def delete_server(self, server):
self._logger.debug("Entering server delete procedure.")
if self.is_server_poweredon(server):
self.stop_server(server)
task = server.obj.Destroy()
self._wait_for_task(task)
self._logger.debug("Server is now deleted.")
def get_server_by_name(self, name):
return self._get_obj_by_name(vim.VirtualMachine, name)
def get_server_by_id(self, id):
return self._get_obj_by_id(vim.VirtualMachine, id)
def find_candidate_hosts(self,
resource_pool,
vm_cpus,
vm_memory,
vm_networks,
allowed_hosts=None,
allowed_clusters=None,
datacenter=None):
self._logger.debug('Finding suitable hosts for deployment.')
# Find the hosts in the correct datacenter
if datacenter:
hosts = self._get_hosts_in_tree(datacenter.obj.hostFolder)
else:
hosts = self._get_hosts()
host_names = [host.name for host in hosts]
self._logger.debug(
'Found hosts: {hosts}'.format(
hosts=', '.join(host_names),
)
)
if allowed_hosts:
hosts = [host for host in hosts if host.name in allowed_hosts]
self._logger.debug(
'Filtered list of hosts to be considered: {hosts}'.format(
hosts=', '.join([host.name for host in hosts]),
)
)
if allowed_clusters:
cluster_list = self._get_clusters()
cluster_names = [cluster.name for cluster in cluster_list]
valid_clusters = set(allowed_clusters).union(set(cluster_names))
self._logger.debug(
'Only hosts on the following clusters will be used: '
'{clusters}'.format(
clusters=', '.join(valid_clusters),
)
)
candidate_hosts = []
for host in hosts:
if not self.host_is_usable(host):
self._logger.warn(
'Host {host} not usable due to health status.'.format(
host=host.name,
)
)
continue
if allowed_clusters:
cluster = self.get_host_cluster_membership(host)
if cluster not in allowed_clusters:
if cluster:
self._logger.warn(
'Host {host} is in cluster {cluster}, '
'which is not an allowed cluster.'.format(
host=host.name,
cluster=cluster,
)
)
else:
self._logger.warn(
'Host {host} is not in a cluster, '
'and allowed clusters have been set.'.format(
host=host.name,
)
)
continue
memory_weight = self.host_memory_usage_ratio(host, vm_memory)
if memory_weight < 0:
self._logger.warn(
'Host {host} will not have enough free memory if all VMs '
'are powered on.'.format(
host=host.name,
)
)
resource_pools = self.get_host_resource_pools(host)
resource_pools = [pool.name for pool in resource_pools]
if resource_pool not in resource_pools:
self._logger.warn(
'Host {host} does not have resource pool {rp}.'.format(
host=host.name,
rp=resource_pool,
)
)
continue
host_nets = set([
(
self._get_normalised_name(network['name']),
network['switch_distributed'],
)
for network in self.get_host_networks(host)
])
vm_nets = set([
(
self._get_normalised_name(network['name']),
network['switch_distributed'],
)
for network in vm_networks
])
nets_not_on_host = vm_nets.difference(host_nets)
if nets_not_on_host:
message = 'Host {host} does not have all required networks. '
missing_standard_nets = ', '.join([
net[0] for net in nets_not_on_host
if not net[1]
])
missing_distributed_nets = ', '.join([
net[0] for net in nets_not_on_host
if net[1]
])
if missing_standard_nets:
message += 'Missing standard networks: {nets}. '
if missing_distributed_nets:
message += 'Missing distributed networks: {dnets}. '
self._logger.warn(
message.format(
host=host.name,
nets=missing_standard_nets,
dnets=missing_distributed_nets,
)
)
continue
self._logger.debug(
'Host {host} is a candidate for deployment.'.format(
host=host.name,
)
)
candidate_hosts.append((
host,
self.host_cpu_thread_usage_ratio(host, vm_cpus),
memory_weight,
))
# Sort hosts based on the best processor ratio after deployment
if candidate_hosts:
self._logger.debug(
'Host CPU ratios: {ratios}'.format(
ratios=', '.join([
'{hostname}: {ratio} {mem_ratio}'.format(
hostname=c[0].name,
ratio=c[1],
mem_ratio=c[2],
) for c in candidate_hosts
])
)
)
candidate_hosts.sort(
reverse=True,
key=lambda host_rating: host_rating[1] * host_rating[2]
# If more ratios are added, take care that they are proper ratios
# (i.e. > 0), because memory ([2]) isn't, and 2 negatives would
# cause badly ordered candidates.
)
if candidate_hosts:
return candidate_hosts
else:
message = (
"No healthy hosts could be found with resource pool {pool}, "
"and all required networks."
).format(pool=resource_pool, memory=vm_memory)
if allowed_hosts:
message += " Only these hosts were allowed: {hosts}".format(
hosts=', '.join(allowed_hosts)
)
if allowed_clusters:
message += (
" Only hosts in these clusters were allowed: {clusters}"
).format(
clusters=', '.join(allowed_clusters)
)
raise NonRecoverableError(message)
def get_resource_pool(self, host, resource_pool_name):
"""
Get the correct resource pool object from the given host.
"""
resource_pools = self.get_host_resource_pools(host)
for resource_pool in resource_pools:
if resource_pool.name == resource_pool_name:
return resource_pool
# If we get here, we somehow selected a host without the right
# resource pool. This should not be able to happen.
raise NonRecoverableError(
'Resource pool {rp} not found on host {host}. '
'Pools found were: {pools}'.format(
rp=resource_pool_name,
host=host.name,
pools=', '.join([p.name for p in resource_pools]),
)
)
def select_host_and_datastore(self,
candidate_hosts,
vm_memory,
template,
allowed_datastores=None):
"""
Select which host and datastore to use.
This will assume that the hosts are sorted from most desirable to
least desirable.
"""
self._logger.debug('Selecting best host and datastore.')
best_host = None
best_datastore = None
best_datastore_weighting = None
if allowed_datastores:
datastore_list = self._get_datastores()
datastore_names = [datastore.name for datastore in datastore_list]
valid_datastores = set(allowed_datastores).union(
set(datastore_names)
)
self._logger.debug(
'Only the following datastores will be used: '
'{datastores}'.format(
datastores=', '.join(valid_datastores),
)
)
for host in candidate_hosts:
host = host[0]
self._logger.debug('Considering host {host}'
.format(host=host.name))
datastores = host.datastore
self._logger.debug(
'Host {host} has datastores: {ds}'.format(
host=host.name,
ds=', '.join([ds.name for ds in datastores]),
)
)
if allowed_datastores:
self._logger.debug(
'Checking only allowed datastores: {allow}'.format(
allow=', '.join(allowed_datastores),
)
)
datastores = [
ds for ds in datastores
if ds.name in allowed_datastores
]
if len(datastores) == 0:
self._logger.warn(
'Host {host} had no allowed datastores.'.format(
host=host.name,
)
)
continue
self._logger.debug(
'Filtering for healthy datastores on host {host}'.format(
host=host.name,
)
)
healthy_datastores = []
for datastore in datastores:
if self.datastore_is_usable(datastore):
self._logger.debug(
'Datastore {ds} on host {host} is healthy.'.format(
ds=datastore.name,
host=host.name,
)
)
healthy_datastores.append(datastore)
else:
self._logger.warn(
'Excluding datastore {ds} on host {host} as it is '
'not healthy.'.format(
ds=datastore.name,
host=host.name,
)
)
if len(healthy_datastores) == 0:
self._logger.warn(
'Host {host} has no usable datastores.'.format(
host=host.name,
)
)
candidate_datastores = []
for datastore in healthy_datastores:
weighting = self.calculate_datastore_weighting(
datastore=datastore,
vm_memory=vm_memory,
template=template,
)
if weighting is not None:
self._logger.debug(
'Datastore {ds} on host {host} has suitability '
'{weight}'.format(
ds=datastore.name,
weight=weighting,
host=host.name,
)
)
candidate_datastores.append((datastore, weighting))
else:
self._logger.warn(
'Datastore {ds} on host {host} does not have enough '
'free space.'.format(
ds=datastore.name,
host=host.name,
)
)
if candidate_datastores:
candidate_host = host
candidate_datastore, candidate_datastore_weighting = max(
candidate_datastores,
key=lambda datastore: datastore[1],
)
if best_datastore is None:
best_host = candidate_host
best_datastore = candidate_datastore
best_datastore_weighting = candidate_datastore_weighting
else:
if best_datastore_weighting < 0:
# Use the most desirable host unless it can't house
# the VM's maximum space usage (assuming the entire
# virtual disk is filled up), and unless this
# datastore can.
if candidate_datastore_weighting >= 0:
best_host = candidate_host
best_datastore = candidate_datastore
best_datastore_weighting = (
candidate_datastore_weighting
)
if candidate_host == best_host and (
candidate_datastore == best_datastore
):
self._logger.debug(
'Host {host} and datastore {datastore} are current '
'best candidate. Best datastore weighting '
'{weight}.'.format(
host=best_host.name,
datastore=best_datastore.name,
weight=best_datastore_weighting,
)
)
if best_host is not None:
return best_host, best_datastore
else:
message = 'No datastores found with enough space.'
if allowed_datastores:
message += ' Only these datastores were allowed: {ds}'
message = message.format(ds=', '.join(allowed_datastores))
message += ' Only the suitable candidate hosts were checked: '
message += '{hosts}'.format(hosts=', '.join(
[candidate[0].name for candidate in candidate_hosts]
))
raise NonRecoverableError(message)
def get_host_free_memory(self, host):
"""
Get the amount of unallocated memory on a host.
"""
total_memory = host.hardware.memorySize // 1024 // 1024
used_memory = 0
for vm in host.vm:
if not vm.summary.config.template:
try:
used_memory += int(vm.summary.config.memorySizeMB)
except StandardError:
self._logger.warning(
"Incorrect value for memorySizeMB. It is {0} but "
"integer value is expected"
.format(vm.summary.config.memorySizeMB))
return total_memory - used_memory
def host_cpu_thread_usage_ratio(self, host, vm_cpus):
"""
Check the usage ratio of actual CPU threads to assigned threads.
This should give a higher rating to those hosts with less threads
assigned compared to their total available CPU threads.
This is used rather than a simple absolute number of cores
remaining to avoid allocating to a less sensible host if, for
example, there are two hypervisors, one with 12 CPU threads and
one with 6, both of which have 4 more virtual CPUs assigned than
actual CPU threads. In this case, both would be rated at -4, but
the actual impact on the one with 12 threads would be lower.
"""
total_threads = host.hardware.cpuInfo.numCpuThreads
total_assigned = vm_cpus
for vm in host.vm:
try:
total_assigned += int(vm.summary.config.numCpu)
except StandardError:
self._logger.warning("Incorrect value for numCpu. It is "
"{0} but integer value is expected"
.format(vm.summary.config.numCpu))
return total_threads / total_assigned
def host_memory_usage_ratio(self, host, new_mem):
"""
Return the proporiton of resulting memory overcommit if a VM with
new_mem is added to this host.
"""
free_memory = self.get_host_free_memory(host)
free_memory_after = free_memory - new_mem
weight = free_memory_after / (host.hardware.memorySize // 1024 // 1024)
return weight
def datastore_is_usable(self, datastore):
"""
Return True if this datastore is usable for deployments,
based on its health.
Return False otherwise.
"""
return datastore.overallStatus in (
vim.ManagedEntity.Status.green,
vim.ManagedEntity.Status.yellow,
) and datastore.summary.accessible
def calculate_datastore_weighting(self,
datastore,
vm_memory,
template):
"""
Determine how suitable this datastore is for this deployment.
Returns None if it is not suitable. Otherwise, returns a weighting
where higher is better.
"""
# We assign memory in MB, but free space is in B
vm_memory = vm_memory * 1024 * 1024
free_space = datastore.summary.freeSpace
minimum_disk = template.summary.storage.committed
maximum_disk = template.summary.storage.uncommitted
minimum_used = minimum_disk + vm_memory
maximum_used = minimum_used + maximum_disk
#zhutao
if free_space < 0:
return None
else:
return free_space
#if free_space - minimum_used < 0:
# return None
#else:
# return free_space - maximum_used
def recurse_resource_pools(self, resource_pool):
"""
Recursively get all child resource pools given a resource pool.
Return a list of all resource pools found.
"""
resource_pool_names = []
for pool in resource_pool.resourcePool:
resource_pool_names.append(pool)
resource_pool_names.extend(self.recurse_resource_pools(pool))
return resource_pool_names
def get_host_networks(self, host):
"""
Get all networks attached to this host.
Returns a list of dicts in the form:
{
'name': <name of network>,
'switch_distributed': <whether net is distributed>,
}
"""
nets = [
{
'name': net.name,
'switch_distributed': self._port_group_is_distributed(net),
}
for net in host.network
]
return nets
def get_host_resource_pools(self, host):
"""
Get all resource pools available on this host.
This will work for hosts inside and outside clusters.
A list of resource pools will be returned, e.g.
['Resources', 'myresourcepool', 'anotherone']
"""
base_resource_pool = host.parent.resourcePool
resource_pools = [base_resource_pool]
child_resource_pools = self.recurse_resource_pools(base_resource_pool)
resource_pools.extend(child_resource_pools)
return resource_pools
def get_host_cluster_membership(self, host):
"""
Return the name of the cluster this host is part of,
or None if it is not part of a cluster.
"""
if (
isinstance(host.parent, vim.ClusterComputeResource) or
isinstance(host.parent.obj, vim.ClusterComputeResource)
):
return host.parent.name
else:
return None
def host_is_usable(self, host):
"""
Return True if this host is usable for deployments,
based on its health.
Return False otherwise.
"""
healthy_state = host.overallStatus in (
vim.ManagedEntity.Status.green,
vim.ManagedEntity.Status.yellow,
)
connected = host.summary.runtime.connectionState == 'connected'
maintenance = host.summary.runtime.inMaintenanceMode
if healthy_state and connected and not maintenance:
# TODO: Check license state (will be yellow for bad license)
return True
else:
return False
def resize_server(self, server, cpus=None, memory=None):
self._logger.debug("Entering resize reconfiguration.")
config = vim.vm.ConfigSpec()
if cpus is not None:
try:
cpus = int(cpus)
except (ValueError, TypeError) as e:
raise NonRecoverableError(
"Invalid cpus value: {}".format(e))
if cpus < 1:
raise NonRecoverableError(
"cpus must be at least 1. Is {}".format(cpus))
config.numCPUs = cpus
if memory is not None:
try:
memory = int(memory)
except (ValueError, TypeError) as e:
raise NonRecoverableError(
"Invalid memory value: {}".format(e))
if memory < 512:
raise NonRecoverableError(
"Memory must be at least 512MB. Is {}".format(memory))
if memory % 128:
raise NonRecoverableError(
"Memory must be an integer multiple of 128. Is {}".format(
memory))
config.memoryMB = memory
task = server.obj.Reconfigure(spec=config)
try:
self._wait_for_task(task)
except NonRecoverableError as e:
if 'configSpec.memoryMB' in e.args[0]:
raise NonRecoverableError(
"Memory error resizing Server. May be caused by "
"https://kb.vmware.com/kb/2008405 . If so the Server may "
"be resized while it is switched off.",
e,
)
raise
self._logger.debug(
"Server '%s' resized with new number of "
"CPUs: %s and RAM: %s." % (server.name, cpus, memory))
def get_server_ip(self, vm, network_name, ignore_local=True):
self._logger.debug(
'Getting server IP from {network}.'.format(
network=network_name,
)
)
for network in vm.guest.net:
if not network.network:
self._logger.warn(
'Ignoring device with MAC {mac} as it is not on a '
'vSphere network.'.format(
mac=network.macAddress,
)
)
continue
if (
network.network and
network_name.lower() == self._get_normalised_name(
network.network) and
len(network.ipAddress) > 0
):
ip_address = get_ip_from_vsphere_nic_ips(network, ignore_local)
# This should be debug, but left as info until CFY-4867 makes
# logs more visible
self._logger.info(
'Found {ip} from device with MAC {mac}'.format(
ip=ip_address,
mac=network.macAddress,
)
)
return ip_address
def _task_guest_state_is_running(self, task):
try:
self._logger.debug("VM state: {state}".format(
state=task.info.result.guest.guestState))
return task.info.result.guest.guestState == "running"
except vmodl.fault.ManagedObjectNotFound:
raise NonRecoverableError(
'Server failed to enter running state, task has been deleted '
'by vCenter after failing.'
)
def _task_guest_has_networks(self, task, adaptermaps):
# We should possibly be checking that it has the number of networks
# expected here, but investigation will be required to confirm this
# behaves as expected (and the VM state check later handles it anyway)
if len(adaptermaps) == 0:
return True
else:
if len(task.info.result.guest.net) > 0:
return True
else:
return False
def _wait_vm_running(self, task, adaptermaps, other=False):
# skip guests check for other
other = True
if other:
self._logger.info("Skip guest checks for other os")
return
# wait for task finish
self._wait_for_task(task)
# check VM state
while not self._task_guest_state_is_running(task):
time.sleep(TASK_CHECK_SLEEP)
# check guest networks
if not self._task_guest_has_networks(task, adaptermaps):
time.sleep(TASK_CHECK_SLEEP)
class NetworkClient(VsphereClient):
def get_host_list(self, force_refresh=True):
# Each invocation of this takes up to a few seconds, so try to avoid
# calling it too frequently by caching
if hasattr(self, 'host_list') and not force_refresh:
return self.host_list
self.host_list = self._get_hosts()
return self.host_list
def delete_port_group(self, name):
self._logger.debug("Deleting port group {name}.".format(name=name))
for host in self.get_host_list():
host.configManager.networkSystem.RemovePortGroup(name)
self._logger.debug("Port group {name} was deleted.".format(name=name))
def get_vswitches(self):
self._logger.debug('Getting list of vswitches')
# We only want to list vswitches that are on all hosts, as we will try
# to create port groups on the same vswitch on every host.
vswitches = set()
for host in self._get_hosts(use_cache=False):
conf = host.config
current_host_vswitches = set()
for vswitch in conf.network.vswitch:
current_host_vswitches.add(vswitch.name)
if len(vswitches) == 0:
vswitches = current_host_vswitches
else:
vswitches = vswitches.union(current_host_vswitches)
self._logger.debug('Found vswitches: {vswitches}'
.format(vswitches=vswitches))
return vswitches
def get_vswitch_mtu(self, vswitch_name):
mtu = -1
for host in self._get_hosts():
conf = host.config
for vswitch in conf.network.vswitch:
if vswitch_name == vswitch.name:
if mtu == -1:
mtu = vswitch.mtu
elif mtu > vswitch.mtu:
mtu = vswitch.mtu
return mtu
def get_dvswitches(self):
self._logger.debug('Getting list of dvswitches')
# This does not currently address multiple datacenters (indeed,
# much of this code will probably have issues in such an environment).
dvswitches = self._get_dvswitches()
dvswitches = [dvswitch.name for dvswitch in dvswitches]
self._logger.debug('Found dvswitches: {dvswitches}'
.format(dvswitches=dvswitches))
return dvswitches
def create_port_group(self, port_group_name, vlan_id, vswitch_name,esxi_ip=''):
self._logger.debug("Entering create port procedure.")
runtime_properties = ctx.instance.runtime_properties
if NETWORK_STATUS not in runtime_properties.keys():
runtime_properties[NETWORK_STATUS] = 'preparing'
vswitches = self.get_vswitches()
if runtime_properties[NETWORK_STATUS] == 'preparing':
if vswitch_name not in vswitches:
if len(vswitches) == 0:
raise NonRecoverableError(
'No valid vswitches found. '
'Every physical host in the datacenter must have the '
'same named vswitches available when not using '
'distributed vswitches.'
)
else:
raise NonRecoverableError(
'{vswitch} was not a valid vswitch name. The valid '
'vswitches are: {vswitches}'.format(
vswitch=vswitch_name,
vswitches=', '.join(vswitches),
)
)
# update mtu
ctx.instance.runtime_properties[NETWORK_MTU] = self.get_vswitch_mtu(
vswitch_name)
if runtime_properties[NETWORK_STATUS] in ('preparing', 'creating'):
runtime_properties[NETWORK_STATUS] = 'creating'
if NETWORK_CREATE_ON not in runtime_properties.keys():
runtime_properties[NETWORK_CREATE_ON] = []
# hosts = [
# #zhutao
# host for host in self.get_host_list(force_refresh=True)
# if host.name not in runtime_properties[NETWORK_CREATE_ON]
# ]
hosts = self.get_host_list()
for host in hosts:
if host.name == esxi_ip:
ctx.logger.info('host.name2970 = :%s' % str(host.name))
network_system = host.configManager.networkSystem
specification = vim.host.PortGroup.Specification()
specification.name = port_group_name
specification.vlanId = vlan_id
specification.vswitchName = vswitch_name
vswitch = network_system.networkConfig.vswitch[0]
for v_switch in network_system.networkConfig.vswitch:
if vswitch_name == v_switch.name:
vswitch = v_switch
break
network_policy = deepcopy(vswitch.spec.policy)
network_policy.security = vim.host.NetworkPolicy.SecurityPolicy()
network_policy.security.allowPromiscuous = True
network_policy.security.macChanges = True
network_policy.security.forgedTransmits = True
specification.policy = network_policy
self._logger.debug(
'Adding port group {group_name} to vSwitch '
'{vswitch_name} on host {host_name}'.format(
group_name=port_group_name,
vswitch_name=vswitch_name,
host_name=host.name,
)
)
self._logger.debug("create port group \n\n"
"spec parameters is {arg} \n\n"
"".format(arg=str(specification)))
try:
network_system.AddPortGroup(specification)
except vim.fault.AlreadyExists:
# We tried to create it on a previous pass, but didn't see
# any confirmation (e.g. due to a problem communicating
# with the vCenter)
# However, we shouldn't have reached this point if it
# existed before we tried to create it anywhere, so it
# should be safe to proceed.
pass
runtime_properties[NETWORK_CREATE_ON].append(host.name)
if self.port_group_is_on_all_hosts(port_group_name):
runtime_properties[NETWORK_STATUS] = 'created'
else:
return ctx.operation.retry(
'Waiting for port group {name} to be created on all '
'hosts.'.format(
name=port_group_name,
)
)
def is_pnic_used_by_vswitch(self,
name,
vswitch='',
hosts=[],
esxi_ip=''):
"""
judge physical nic used by virtual switch or not.
if vswitch name is given,
then ignore pnic in this vswitch.
:param name: pnic name
:param vswitch: a vswitch name.
:param hosts:
:return:
"""
exist = False
pnics = []
hostnames = []
for host in hosts:
if host.name == esxi_ip:
ctx.logger.info('host = %s' % str(host))
hostnames.append(host.name)
for vs, host in self.get_vswitch_by_name(esxi_ip=esxi_ip):
if (hostnames and
host.name not in hostnames):
continue
if (vswitch != '' and
vs.name == vswitch):
continue
for pnic in vs.pnic:
if pnic not in pnics:
pnics.append(pnic)
for pnic in pnics:
if name == pnic.rsplit('-', 1)[1]:
exist = True
break
return exist
def get_vswitch_by_name(self,
name='',
hosts=[],
use_cache=False,esxi_ip=''):
"""
hosts :get vswitchs from hosts by name.
:param name: vswitch name.
if name == '' means get all vswitch
:param hosts: a list of host object
:return: a set of vswitchs and the host
where the vswitch is located
"""
ctx.logger.debug("Getting vswitch by name")
result = []
if hosts == []:
hosts = self.get_host_list(
force_refresh=not use_cache
)
if hosts == []:
raise NonRecoverableError(
"Get vswitch by name failed: "
"no avaliable host to get vswitch"
)
for host in hosts:
if host.name == esxi_ip:
ctx.logger.info('host.name3090 = %s' % (str(host.name)))
vswitchs = host.configManager.networkSystem.networkInfo.vswitch
for vswitch in vswitchs:
if (name == '' or
name.lower() == vswitch.name.lower()):
ctx.logger.debug(
"Vswitch (s) info: \n %s." % "".join(
"%s: %s \n" % item
for item in vars(vswitch).items()
)
)
result.append((vswitch, host))
return result
def create_vswitch(self, vswitch_name, bondbridge,esxi_ip=''):
check_name_for_special_characters(vswitch_name)
mtu = 1500
existing_id = self.get_vswitch_by_name(
name=urllib.unquote(vswitch_name),esxi_ip=esxi_ip
)
# if use_existing_resource:
# if not existing_id:
# raise NonRecoverableError(
# 'Could not use existing virtual switch "{name}" '
# 'as no virtual switch by that name exists!'
# ''.format(name=vswitch_name)
# )
if existing_id:
raise NonRecoverableError(
'Could not create new virtual switch "{name}" '
'as a virtual switch by that name already exists'
''.format(
name=vswitch_name,
)
)
nics = bondbridge.split(',')
if nics[0] == '':
raise NonRecoverableError(
"Create virtual switch failed :"
"Lack of necessary parameters nicdevice"
)
else:
nicdevice = nics
for nic in nicdevice:
if self.is_pnic_used_by_vswitch(
urllib.unquote(nic),esxi_ip=esxi_ip
):
raise NonRecoverableError(
"Create virtual switch failed :"
"physical nic {name} used by vswitch"
"".format(name=nic)
)
#
# model = bondbridge.get('linkdiscovermodel', 'listen')
# if model not in LINK_DISCOVERY_PROTOCOL_OPERATION:
# raise NonRecoverableError(
# "The content of operation "
# "cannot be identified."
# "operation is {ops}, "
# "But available value is {link}"
# "".format(ops=model,
# link = str(LINK_DISCOVERY_PROTOCOL_OPERATION))
# )
# protocol = bondbridge.get('linkdiscoverprotocol', 'cdp')
# if protocol not in LINK_DISCOVERY_PROTOCOL_PROTOCOL:
# raise NonRecoverableError(
# "The content of protocol "
# "cannot be identified."
# "operation is {pro}, "
# "But available value is {link}"
# "".format(pro=protocol,
# link=str(LINK_DISCOVERY_PROTOCOL_PROTOCOL))
# )
security = {}
# for key,value in SECURITY_POLICY_CONVERT.items():
# if key in securitypolicy:
# security.setdefault(value, securitypolicy[key])
nicteaming = {}
nicteaming['activeNic'] = []
# for key,value in NIC_TEAMING_POLICY_CONVERT.items():
# if (key in nicteamingpolicy and
# isinstance(nicteamingpolicy[key], bool)):
# nicteaming.setdefault(value, nicteamingpolicy[key])
#
# elif(key == 'loadbalance' and
# nicteamingpolicy[key] in LOAD_BALANCE):
# nicteaming.setdefault(value, nicteamingpolicy[key])
#
# elif key == 'rollingorder':
# nicteaming.setdefault(value,nicteamingpolicy.get(key, False))
#
# elif key in ['activenic', 'standbynic']:
# nic = nicteamingpolicy.get(key, '').split(',')
# if nic[0] == '':
# nicteaming.setdefault(value, [])
# else:
# nicteaming.setdefault(value, nic)
if nicteaming['activeNic'] == []:
nicteaming['activeNic'] = nicdevice
shaping = {}
# if ('trafficshaping' in trafficshapingpolicy and
# isinstance(trafficshapingpolicy['trafficshaping'], bool)):
# shaping['enabled'] = trafficshapingpolicy['trafficshaping']
#
# if shaping.get('enabled', False):
# shaping['averageBandwidth'] = \
# trafficshapingpolicy.get('averagebandwidth', 100000)
# shaping['peakBandwidth'] = \
# trafficshapingpolicy.get('peakbandwidth', 100000)
# shaping['burstSize'] = \
# trafficshapingpolicy.get('burstsize', 102400)
ctx.logger.debug(
'Creating called {name} and mtu {mtu} on '
'{nics}'.format(
name=vswitch_name,
mtu=mtu,
nics=str(nicdevice),
)
)
newvswitchs = self.create_standard_vswitch(
mtu=mtu,
name=vswitch_name,
nicdevice=nicdevice,
operation='listen',
protocol='cdp',
numports=1024,
securitypolicy=security,
shapingpolicy=shaping,
nicteamingpolicy=nicteaming,
esxi_ip=esxi_ip
)
nics = []
for vswitch, host in newvswitchs:
nics.extend(vswitch.pnic)
runtime_properties = ctx.instance.runtime_properties
runtime_properties[VSWITCH_NAME] = vswitch_name
runtime_properties[VSWITCH_PNIC] = nics
ctx.logger.info('Successfully created virtual switch: {name}'
''.format(name=vswitch_name))
def get_host_by_nics_name(self, nicdevices,esxi_ip=''):
"""
Determine whether more than one nic devices
exists at the same time in a host
:param nicdevices: a list of nic device's name
:return:a list of hosts object
"""
hosts = []
for host in self.get_host_list():
if host.name == esxi_ip:
#ctx.logger.info('host.name3257 = %s' % str(host.name))
nicnames = []
for nic in host.config.network.pnic:
nicnames.append(nic.device)
for nicname in nicdevices:
if nicname not in nicnames:
break
else:
hosts.append(host)
return hosts
def _create_bond_bridge(self,
nicdevice,
bondbridge=None,
interval=1,
protocol='cdp',
operation='listen'
):
"""
:param nicdevice: a list of physical nic name
:param bondbridge:
:param interval:
:param protocol: available value: 'cdp' 'lldp'
:param operation: available value:
'advertise' 'listen' 'both' 'down'
:return:
"""
if not bondbridge:
bondbridge = \
vim.host.VirtualSwitch.BondBridge()
bondbridge.nicDevice = nicdevice
if not bondbridge.beacon:
bondbridge.beacon = \
vim.host.VirtualSwitch.BeaconConfig()
bondbridge.beacon.interval = interval
if not bondbridge.linkDiscoveryProtocolConfig:
bondbridge.linkDiscoveryProtocolConfig = \
vim.host.LinkDiscoveryProtocolConfig()
bondbridge.linkDiscoveryProtocolConfig.protocol = protocol
bondbridge.linkDiscoveryProtocolConfig.operation = operation
return bondbridge
def _create_network_policy(self,
netpolicy=None,
securitypolicy ={},
offloadpolicy = {},
shapingpolicy = {},
nicteamingpolicy={}):
"""
The default is to inherit from vswitch
:param securitypolicy = {
allowPromiscuous = true/false/None
macChanges = true/false/None,
forgedTransmits = true/false/None
},
securitypolicy is {} means inherit from vswitch
:param nicteamingpolicy = {
policy = 'loadbalance_ip'/
'loadbalance_srcmac'/
'loadbalance_srcid'/
'failover_explicit',
reversePolicy = true,
notifySwitches = true,
rollingOrder = false,
checkSpeed = 'minimum',
speed = 10,
checkDuplex = false,
fullDuplex = false,
checkErrorPercent = false,
percentage = 0,
checkBeacon = false(link)/true(beaconing)
activeNic = (str) [
'vmnic2'
],
standbyNic = (str) []
},
nicteamingpolicy is {} means inherit from vswitch
:param offloadpolicy = {
csumOffload = <unset>,
tcpSegmentation = <unset>,
zeroCopyXmit = <unset>
}
offloadpolicy is {} means inherit from vswitch
:param shapingpolicy = {
enabled = <unset>,
averageBandwidth = <unset>Kb/s,
peakBandwidth = <unset>Kb/s,
burstSize = <unset>KB
}
shapingpolicy is {} means inherit from vswitch
:return:
"""
if not netpolicy:
netpolicy = vim.host.NetworkPolicy()
#config security policy
if (not netpolicy.security and
securitypolicy != {}):
netpolicy.security = \
vim.host.NetworkPolicy.SecurityPolicy()
for key, value in securitypolicy.items():
if (hasattr(netpolicy.security, key) and
isinstance(value, bool)):
setattr(netpolicy.security, key, value)
else:
raise NonRecoverableError(
"There is no key-value pair "
"in securityPolicy={all}. "
"key={key}, value={val}"
"".format(all=str(securitypolicy),
key=key,
val=value
)
)
#config nic teaming policy
if (not netpolicy.nicTeaming and
nicteamingpolicy != {}):
netpolicy.nicTeaming = \
vim.host.NetworkPolicy.NicTeamingPolicy()
if (hasattr(netpolicy.nicTeaming, 'failureCriteria') and
not netpolicy.nicTeaming.failureCriteria ):
netpolicy.nicTeaming.failureCriteria = \
vim.host.NetworkPolicy.NicFailureCriteria()
if (hasattr(netpolicy.nicTeaming, 'nicOrder') and
not netpolicy.nicTeaming.nicOrder and
('activeNic' in nicteamingpolicy or
'standbyNic' in nicteamingpolicy)):
netpolicy.nicTeaming.nicOrder = \
vim.host.NetworkPolicy.NicOrderPolicy()
for key, value in nicteamingpolicy.items():
if (key == 'policy' and
value in LOAD_BALANCE):
netpolicy.nicTeaming.policy = value
elif (key in NIC_TEAMING_FAILURE_CRITERIA.keys() and
isinstance(value, NIC_TEAMING_FAILURE_CRITERIA[key])):
setattr(netpolicy.nicTeaming.failureCriteria, key, value)
elif (key in ['activeNic', 'standbyNic'] and
isinstance(value, list)):
setattr(netpolicy.nicTeaming.nicOrder, key, value)
elif (hasattr(netpolicy.nicTeaming, key) and
isinstance(value, bool)):
setattr(netpolicy.nicTeaming, key, value)
else:
raise NonRecoverableError(
"There is no key-value pair "
"in nicteamingpolicy={all}. "
"key={key}, value={val}"
"".format(all=str(nicteamingpolicy),
key=key,
val=value)
)
#config offload policy
if (not netpolicy.offloadPolicy and
offloadpolicy != {}):
netpolicy.offloadPolicy = \
vim.host.NetOffloadCapabilities()
for key, value in offloadpolicy.items():
if (hasattr(netpolicy.offloadPolicy, key) and
isinstance(value, bool)):
setattr(netpolicy.offloadPolicy,
key, value)
else:
raise NonRecoverableError(
"There is no key-value pair "
"in offloadpolicy={all}. "
"key={key}, value={val}"
"".format(all=str(offloadpolicy),
key=key,
val=value)
)
#config shaping policy
if (not netpolicy.shapingPolicy and
"enabled" in shapingpolicy):
netpolicy.shapingPolicy = \
vim.host.NetworkPolicy.TrafficShapingPolicy()
if shapingpolicy.get('enabled'):
for key, value in shapingpolicy.items():
if hasattr(netpolicy.shapingPolicy, key):
setattr(netpolicy.shapingPolicy,
key, value)
else:
raise NonRecoverableError(
"There is no key-value pair"
"in shapingpolicy={all}."
"key={key}, value={val}"
"".format(all=str(shapingpolicy),
key=key,
val=value)
)
elif shapingpolicy.get('enabled') == False:
netpolicy.shapingPolicy.enabled = False
return netpolicy
def create_standard_vswitch(self,
mtu,
name,
protocol,
operation,
nicdevice,
numports,
securitypolicy,
shapingpolicy,
nicteamingpolicy,
esxi_ip=''):
"""
:param name: a new standard vswitch name
:param nicdevice: a list of uplink nic name
:param mtu:
:param numports:
:param operation: 'advertise',
'listen',
'both',
'down'
:param protocol: 'cdp' or 'lldp'
:param securitypolicy = {
allowPromiscuous = true/false/None,
macChanges = true/false/None,
forgedTransmits = true/false/None
},
:param nicteamingpolicy = {
policy = 'loadbalance_ip'/
'loadbalance_srcmac'/
'loadbalance_srcid'/
'failover_explicit',
notifySwitches = true,
reversePolicy = true,
rollingOrder = false,
checkBeacon = false(link)/true(beaconing)
activeNic = (str) [
'vmnic2'
],
standbyNic = (str) []
},
:param shapingpolicy = {
enabled = <unset>,
averageBandwidth = <unset>Kb/s,
peakBandwidth = <unset>Kb/s,
burstSize = <unset>KB
}
:return:
"""
ctx.logger.debug("Entering create vswitch procedure.")
runtime_properties = ctx.instance.runtime_properties
#check nic devices exists or not
hosts = self.get_host_by_nics_name(
nicdevices=nicdevice,esxi_ip=esxi_ip
)
if hosts == []:
raise NonRecoverableError(
"The appropriate host creation "
"Vswitch could not be found. "
"There may not be a Vswtich "
"corresponding physical network port."
"physical nic is {nic}".format(nic=str(nicdevice))
)
spec = vim.host.VirtualSwitch.Specification()
spec.mtu = mtu
spec.numPorts = numports
spec.bridge = self._create_bond_bridge(
nicdevice=nicdevice,
operation=operation,
protocol=protocol
)
# These properties are not open to users,
# but they are necessary for creating vswitch.
# nicteamingpolicy.update(NIC_TEAMING_FAILURE_CRITERIA_VALUE)
#
# spec.policy = self._create_network_policy(
# securitypolicy=securitypolicy,
# shapingpolicy=shapingpolicy,
# nicteamingpolicy=nicteamingpolicy,
# offloadpolicy=DEFAULT_OFFLOAD_POLICY
# )
ctx.logger.debug("create vswitch \n\n"
"spec parameters is {arg} \n\n"
"".format(arg=str(spec)))
for host in hosts:
if host.name == esxi_ip:
ctx.logger.info('host.name3572 = %s' % str(host.name))
# if ('created_on' in runtime_properties and
# host.name not in runtime_properties['created_on']):
# continue
networksys = host.configManager.networkSystem
try:
networksys.AddVirtualSwitch(
vswitchName=name,
spec=spec
)
networksys.Refresh()
except vim.fault.AlreadyExists :
pass
except BaseException as e:
raise NonRecoverableError(
"Create Standard Virtual Switch failed:\n"
"message is {mes}\n".format(
mes=str(e)
)
)
# Check if vswitch is created on
# the corresponding host as required.
newvswitchs = self.get_vswitch_by_name(
name=name,
use_cache=False,
esxi_ip=esxi_ip
)
# hostsuuid = []
# for vswitch, host in newvswitchs:
# hostsuuid.append(host.hardware.systemInfo.uuid)
#
# for host in hosts:
# if host.hardware.systemInfo.uuid not in hostsuuid:
# ctx.logger.warn(
# "Create Virtual Switch {vs} "
# "in host {host} failed".format(
# vs=name,
# host=host.name
# )
# )
# continue
#
# if runtime_properties.get('created_on') is None:
# runtime_properties['created_on'] = [host.name]
#
# elif host.name not in runtime_properties['created_on']:
# runtime_properties['created_on'].append(host.name)
return newvswitchs
def port_group_is_on_all_hosts(self, port_group_name, distributed=False):
port_groups, hosts = self._get_port_group_host_count(
port_group_name,
distributed,
)
return hosts == port_groups
def _get_port_group_host_count(self, port_group_name, distributed=False):
hosts = self.get_host_list()
host_count = len(hosts)
port_groups = self._get_networks()
if distributed:
port_groups = [
pg
for pg in port_groups
if self._port_group_is_distributed(pg)
]
else:
port_groups = [
pg
for pg in port_groups
if not self._port_group_is_distributed(pg)
]
# Observed to create multiple port groups in some circumstances,
# but with different amounts of attached hosts
port_groups = [pg for pg in port_groups if pg.name == port_group_name]
port_group_counts = [len(pg.host) for pg in port_groups]
port_group_count = sum(port_group_counts)
self._logger.debug(
'{type} group {name} found on {port_group_count} out of '
'{host_count} hosts.'.format(
type='Distributed port' if distributed else 'Port',
name=port_group_name,
port_group_count=port_group_count,
host_count=host_count,
)
)
return port_group_count, host_count
def get_port_group_by_name(self, name,esxi_ip = ''):
self._logger.debug("Getting port group by name.")
result = []
for host in self.get_host_list():
if host.name == esxi_ip:
network_system = host.configManager.networkSystem
port_groups = network_system.networkInfo.portgroup
for port_group in port_groups:
if name.lower() == port_group.spec.name.lower():
self._logger.debug(
"Port group(s) info: \n%s." % "".join(
"%s: %s" % item
for item in
vars(port_group).items()))
result.append(port_group)
return result
def create_dv_port_group(self, port_group_name, vlan_id, vswitch_name):
self._logger.debug("Creating dv port group.")
dvswitches = self.get_dvswitches()
if vswitch_name not in dvswitches:
if len(dvswitches) == 0:
raise NonRecoverableError(
'No valid dvswitches found. '
'A distributed virtual switch must exist for distributed '
'port groups to be used.'
)
else:
raise NonRecoverableError(
'{dvswitch} was not a valid dvswitch name. The valid '
'dvswitches are: {dvswitches}'.format(
dvswitch=vswitch_name,
dvswitches=', '.join(dvswitches),
)
)
dv_port_group_type = 'earlyBinding'
dvswitch = self._get_obj_by_name(
vim.DistributedVirtualSwitch,
vswitch_name,
)
self._logger.debug("Distributed vSwitch info: {dvswitch}"
.format(dvswitch=dvswitch))
# update mtu
dvswitch = self._get_obj_by_name(
vim.DistributedVirtualSwitch,
vswitch_name,
)
ctx.instance.runtime_properties[
NETWORK_MTU] = dvswitch.obj.config.maxMtu
vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec(
vlanId=vlan_id)
port_settings = \
vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy(
vlan=vlan_spec)
specification = vim.dvs.DistributedVirtualPortgroup.ConfigSpec(
name=port_group_name,
defaultPortConfig=port_settings,
type=dv_port_group_type)
self._logger.debug(
'Adding distributed port group {group_name} to dvSwitch '
'{dvswitch_name}'.format(
group_name=port_group_name,
dvswitch_name=vswitch_name,
)
)
task = dvswitch.obj.AddPortgroup(specification)
self._wait_for_task(task)
self._logger.debug("Port created.")
def delete_dv_port_group(self, name):
self._logger.debug("Deleting dv port group {name}.".format(name=name))
dv_port_group = self._get_obj_by_name(
vim.dvs.DistributedVirtualPortgroup,
name,
)
task = dv_port_group.obj.Destroy()
self._wait_for_task(task)
self._logger.debug("Port deleted.")
def get_network_cidr(self, name, switch_distributed):
# search in all datacenters
for dc in self.si.content.rootFolder.childEntity:
# select all ipppols
pools = self.si.content.ipPoolManager.QueryIpPools(dc=dc)
for pool in pools:
# check network associations pools
for association in pool.networkAssociation:
# check network type
network_distributed = isinstance(
association.network,
vim.dvs.DistributedVirtualPortgroup)
if (
association.networkName == name and
network_distributed == switch_distributed
):
# convert network information to CIDR
return str(netaddr.IPNetwork(
'{network}/{netmask}'
.format(network=pool.ipv4Config.subnetAddress,
netmask=pool.ipv4Config.netmask)))
# We dont have any ipppols related to network
return "0.0.0.0/0"
def get_network_mtu(self, name, switch_distributed):
if switch_distributed:
# select virtual port group
dv_port_group = self._get_obj_by_name(
vim.dvs.DistributedVirtualPortgroup,
name,
)
if not dv_port_group:
raise NonRecoverableError(
"Unable to get DistributedVirtualPortgroup: {name}"
.format(name=repr(name)))
# get assigned VirtualSwith
dvSwitch = dv_port_group.config.distributedVirtualSwitch
return dvSwitch.obj.config.maxMtu
else:
mtu = -1
# search hosts with vswitches
hosts = self.get_host_list()
for host in hosts:
conf = host.config
# iterate by vswitches
for vswitch in conf.network.vswitch:
# search port group in linked
port_name = "key-vim.host.PortGroup-{name}".format(
name=name)
# check that we have linked network in portgroup(str list)
if port_name in vswitch.portgroup:
# use mtu from switch
if mtu == -1:
mtu = vswitch.mtu
elif mtu > vswitch.mtu:
mtu = vswitch.mtu
return mtu
def create_ippool(self, datacenter_name, ippool, networks):
# create ip pool only on specific datacenter
dc = self._get_obj_by_name(vim.Datacenter, datacenter_name)
if not dc:
raise NonRecoverableError(
"Unable to get datacenter: {datacenter}"
.format(datacenter=repr(datacenter_name)))
pool = vim.vApp.IpPool(name=ippool['name'])
pool.ipv4Config = vim.vApp.IpPool.IpPoolConfigInfo()
pool.ipv4Config.subnetAddress = ippool['subnet']
pool.ipv4Config.netmask = ippool['netmask']
pool.ipv4Config.gateway = ippool['gateway']
pool.ipv4Config.range = ippool['range']
pool.ipv4Config.dhcpServerAvailable = ippool.get('dhcp', False)
pool.ipv4Config.ipPoolEnabled = ippool.get('enabled', True)
# add networks to pool
for network in networks:
network_name = network.runtime_properties["network_name"]
self._logger.debug("Attach network {network} to {pool}."
.format(network=network_name,
pool=ippool['name']))
if network.runtime_properties.get("switch_distributed"):
# search vim.dvs.DistributedVirtualPortgroup
dv_port_group = self._get_obj_by_name(
vim.dvs.DistributedVirtualPortgroup,
network_name,
)
pool.networkAssociation.insert(0, vim.vApp.IpPool.Association(
network=dv_port_group.obj))
else:
# search all networks
networks = [
net for net in self._collect_properties(
vim.Network, path_set=["name"],
) if not net['obj']._moId.startswith('dvportgroup')]
# attach all networks with provided name
for net in networks:
if net[VSPHERE_RESOURCE_NAME] == network_name:
pool.networkAssociation.insert(
0, vim.vApp.IpPool.Association(network=net['obj']))
return self.si.content.ipPoolManager.CreateIpPool(dc=dc.obj, pool=pool)
def delete_ippool(self, datacenter_name, ippool_id):
dc = self._get_obj_by_name(vim.Datacenter, datacenter_name)
if not dc:
raise NonRecoverableError(
"Unable to get datacenter: {datacenter}"
.format(datacenter=repr(datacenter_name)))
self.si.content.ipPoolManager.DestroyIpPool(dc=dc.obj, id=ippool_id,
force=True)
class RawVolumeClient(VsphereClient):
def delete_file(self, datacenter_name=None, datastorepath=None,
datacenter_id=None):
if datacenter_id:
dc = self._get_obj_by_id(vim.Datacenter, datacenter_id)
else:
dc = self._get_obj_by_name(vim.Datacenter, datacenter_name)
if not dc:
raise NonRecoverableError(
"Unable to get datacenter: {datacenter_name}/{datacenter_id}"
.format(datacenter_name=repr(datacenter_name),
datacenter_id=repr(datacenter_id)))
self.si.content.fileManager.DeleteFile(datastorepath, dc.obj)
def upload_file(self, datacenter_name, allowed_datastores,
allowed_datastore_ids, remote_file, data, host,
port):
dc = self._get_obj_by_name(vim.Datacenter, datacenter_name)
if not dc:
raise NonRecoverableError(
"Unable to get datacenter: {datacenter}"
.format(datacenter=repr(datacenter_name)))
self._logger.debug(
"Will check storage with IDs: {ids}; and names: {names}"
.format(ids=repr(allowed_datastore_ids),
names=repr(allowed_datastores)))
datastores = self._get_datastores()
ds = None
if not allowed_datastores and not allowed_datastore_ids and datastores:
ds = datastores[0]
else:
# select by datastore ids
if allowed_datastore_ids:
for datastore in datastores:
if datastore.id in allowed_datastore_ids:
ds = datastore
break
# select by datastore names
if not ds and allowed_datastores:
for datastore in datastores:
if datastore.name in allowed_datastores:
ds = datastore
break
if not ds:
raise NonRecoverableError(
"Unable to get datastore {allowed} in {available}"
.format(allowed=repr(allowed_datastores),
available=repr([datastore.name
for datastore in datastores])))
params = {"dsName": ds.name,
"dcPath": dc.name}
http_url = (
"https://" + host + ":" + str(port) + "/folder/" + remote_file
)
# Get the cookie built from the current session
client_cookie = self.si._stub.cookie
# Break apart the cookie into it's component parts - This is more than
# is needed, but a good example of how to break apart the cookie
# anyways. The verbosity makes it clear what is happening.
cookie_name = client_cookie.split("=", 1)[0]
cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[1].split(
";", 1)[0].lstrip()
cookie_text = " " + cookie_value + "; $" + cookie_path
# Make a cookie
cookie = dict()
cookie[cookie_name] = cookie_text
response = requests.put(
http_url,
params=params,
data=data,
headers={'Content-Type': 'application/octet-stream'},
cookies=cookie,
verify=False)
response.raise_for_status()
return dc.id, "[{datastore}] {file_name}".format(
datastore=ds.name, file_name=remote_file)
class StorageClient(VsphereClient):
def create_storage(self, vm_id, storage_size, parent_key, mode,
thin_provision=False):
self._logger.debug("Entering create storage procedure.")
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id)
self._logger.debug("VM info: \n{}".format(vm))
if self.is_server_suspended(vm):
raise NonRecoverableError(
'Error during trying to create storage:'
' invalid VM state - \'suspended\''
)
devices = []
virtual_device_spec = vim.vm.device.VirtualDeviceSpec()
virtual_device_spec.operation =\
vim.vm.device.VirtualDeviceSpec.Operation.add
virtual_device_spec.fileOperation =\
vim.vm.device.VirtualDeviceSpec.FileOperation.create
virtual_device_spec.device = vim.vm.device.VirtualDisk()
virtual_device_spec.device.capacityInKB = storage_size * 1024 * 1024
virtual_device_spec.device.capacityInBytes =\
storage_size * 1024 * 1024 * 1024
virtual_device_spec.device.backing =\
vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
virtual_device_spec.device.backing.diskMode = mode
virtual_device_spec.device.backing.thinProvisioned = thin_provision
virtual_device_spec.device.backing.datastore = vm.datastore[0].obj
vm_devices = vm.config.hardware.device
vm_disk_filename = None
vm_disk_filename_increment = 0
vm_disk_filename_cur = None
for vm_device in vm_devices:
# Search all virtual disks
if isinstance(vm_device, vim.vm.device.VirtualDisk):
# Generate filename (add increment to VMDK base name)
vm_disk_filename_cur = vm_device.backing.fileName
p = re.compile('^(\\[.*\\]\\s+.*\\/.*)\\.vmdk$')
m = p.match(vm_disk_filename_cur)
if vm_disk_filename is None:
vm_disk_filename = m.group(1)
p = re.compile('^(.*)_([0-9]+)\\.vmdk$')
m = p.match(vm_disk_filename_cur)
if m:
if m.group(2) is not None:
increment = int(m.group(2))
vm_disk_filename = m.group(1)
if increment > vm_disk_filename_increment:
vm_disk_filename_increment = increment
# Exit error if VMDK filename undefined
if vm_disk_filename is None:
raise NonRecoverableError(
'Error during trying to create storage:'
' Invalid VMDK name - \'{0}\''.format(vm_disk_filename_cur)
)
# Set target VMDK filename
vm_disk_filename =\
vm_disk_filename +\
"_" + str(vm_disk_filename_increment + 1) +\
".vmdk"
# Search virtual SCSI controller
controller = None
num_controller = 0
controller_types = (
vim.vm.device.VirtualBusLogicController,
vim.vm.device.VirtualLsiLogicController,
vim.vm.device.VirtualLsiLogicSASController,
vim.vm.device.ParaVirtualSCSIController)
for vm_device in vm_devices:
if isinstance(vm_device, controller_types):
if parent_key < 0:
num_controller += 1
controller = vm_device
else:
if parent_key == vm_device.key:
num_controller = 1
controller = vm_device
break
if num_controller != 1:
raise NonRecoverableError(
'Error during trying to create storage: '
'SCSI controller cannot be found or is present more than '
'once.'
)
controller_key = controller.key
# Set new unit number (7 cannot be used, and limit is 15)
unit_number = None
vm_vdisk_number = len(controller.device)
if vm_vdisk_number < 7:
unit_number = vm_vdisk_number
elif vm_vdisk_number == 15:
raise NonRecoverableError(
'Error during trying to create storage: one SCSI controller '
'cannot have more than 15 virtual disks.'
)
else:
unit_number = vm_vdisk_number + 1
virtual_device_spec.device.backing.fileName = vm_disk_filename
virtual_device_spec.device.controllerKey = controller_key
virtual_device_spec.device.unitNumber = unit_number
devices.append(virtual_device_spec)
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = devices
task = vm.obj.Reconfigure(spec=config_spec)
self._logger.debug("Task info: \n%s." % prepare_for_log(vars(task)))
self._wait_for_task(task)
# Get the SCSI bus and unit IDs
scsi_controllers = []
disks = []
# Use the device list from the platform rather than the cache because
# we just created a disk so it won't be in the cache
for device in vm.obj.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualSCSIController):
scsi_controllers.append(device)
elif isinstance(device, vim.vm.device.VirtualDisk):
disks.append(device)
# Find the disk we just created
for disk in disks:
if disk.backing.fileName == vm_disk_filename:
unit = disk.unitNumber
bus_id = None
for controller in scsi_controllers:
if controller.key == disk.controllerKey:
bus_id = controller.busNumber
break
# We found the right disk, we can't do any better than this
break
if bus_id is None:
raise NonRecoverableError(
'Could not find SCSI bus ID for disk with filename: '
'{file}'.format(file=vm_disk_filename)
)
else:
# Give the SCSI ID in the usual format, e.g. 0:1
scsi_id = ':'.join((str(bus_id), str(unit)))
return vm_disk_filename, scsi_id
def delete_storage(self, vm_id, storage_file_name):
self._logger.debug("Entering delete storage procedure.")
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id)
self._logger.debug("VM info: \n{}".format(vm))
if self.is_server_suspended(vm):
raise NonRecoverableError(
"Error during trying to delete storage: invalid VM state - "
"'suspended'"
)
virtual_device_spec = vim.vm.device.VirtualDeviceSpec()
virtual_device_spec.operation =\
vim.vm.device.VirtualDeviceSpec.Operation.remove
virtual_device_spec.fileOperation =\
vim.vm.device.VirtualDeviceSpec.FileOperation.destroy
devices = []
device_to_delete = None
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk)\
and device.backing.fileName == storage_file_name:
device_to_delete = device
if device_to_delete is None:
raise NonRecoverableError(
'Error during trying to delete storage: storage not found')
virtual_device_spec.device = device_to_delete
devices.append(virtual_device_spec)
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = devices
task = vm.obj.Reconfigure(spec=config_spec)
self._logger.debug("Task info: \n%s." % prepare_for_log(vars(task)))
self._wait_for_task(task)
def get_storage(self, vm_id, storage_file_name):
self._logger.debug("Entering get storage procedure.")
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id)
self._logger.debug("VM info: \n{}".format(vm))
if vm:
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk)\
and device.backing.fileName == storage_file_name:
self._logger.debug(
"Device info: \n%s." % prepare_for_log(vars(device))
)
return device
return None
def resize_storage(self, vm_id, storage_filename, storage_size):
self._logger.debug("Entering resize storage procedure.")
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id)
self._logger.debug("VM info: \n{}".format(vm))
if self.is_server_suspended(vm):
raise NonRecoverableError(
'Error during trying to resize storage: invalid VM state'
' - \'suspended\'')
disk_to_resize = None
devices = vm.config.hardware.device
for device in devices:
if (isinstance(device, vim.vm.device.VirtualDisk) and
device.backing.fileName == storage_filename):
disk_to_resize = device
if disk_to_resize is None:
raise NonRecoverableError(
'Error during trying to resize storage: storage not found')
updated_devices = []
virtual_device_spec = vim.vm.device.VirtualDeviceSpec()
virtual_device_spec.operation =\
vim.vm.device.VirtualDeviceSpec.Operation.edit
virtual_device_spec.device = disk_to_resize
virtual_device_spec.device.capacityInKB = storage_size * 1024 * 1024
virtual_device_spec.device.capacityInBytes =\
storage_size * 1024 * 1024 * 1024
updated_devices.append(virtual_device_spec)
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = updated_devices
task = vm.obj.Reconfigure(spec=config_spec)
self._logger.debug("VM info: \n{}".format(vm))
self._wait_for_task(task)
self._logger.debug("Storage resized to a new size %s." % storage_size)
class ControllerClient(VsphereClient):
def detach_controller(self, vm_id, bus_key):
if not vm_id:
raise NonRecoverableError("VM is not defined")
if not bus_key:
raise NonRecoverableError("Device Key is not defined")
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id)
config_spec = vim.vm.device.VirtualDeviceSpec()
config_spec.operation =\
vim.vm.device.VirtualDeviceSpec.Operation.remove
for dev in vm.config.hardware.device:
if hasattr(dev, "key"):
if dev.key == bus_key:
config_spec.device = dev
break
else:
self._logger.debug("Controller is not defined {}".format(bus_key))
return
spec = vim.vm.ConfigSpec()
spec.deviceChange = [config_spec]
task = vm.obj.ReconfigVM_Task(spec=spec)
self._wait_for_task(task)
def attach_controller(self, vm_id, dev_spec, controller_type):
if not vm_id:
raise NonRecoverableError("VM is not defined")
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id)
known_keys = []
for dev in vm.config.hardware.device:
if isinstance(dev, controller_type):
known_keys.append(dev.key)
spec = vim.vm.ConfigSpec()
spec.deviceChange = [dev_spec]
task = vm.obj.ReconfigVM_Task(spec=spec)
self._wait_for_task(task)
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id, use_cache=False)
controller_properties = {}
for dev in vm.config.hardware.device:
if isinstance(dev, controller_type):
if dev.key not in known_keys:
if hasattr(dev, "busNumber"):
controller_properties['busNumber'] = dev.busNumber
controller_properties['busKey'] = dev.key
break
else:
raise NonRecoverableError(
'Have not found key for new added device')
return controller_properties
def generate_scsi_card(self, scsi_properties, vm_id):
if not vm_id:
raise NonRecoverableError("VM is not defined")
vm = self._get_obj_by_id(vim.VirtualMachine, vm_id)
bus_number = scsi_properties.get("busNumber", 0)
adapter_type = scsi_properties.get('adapterType')
scsi_controller_label = scsi_properties['label']
unitNumber = scsi_properties.get("scsiCtlrUnitNumber", -1)
sharedBus = scsi_properties.get("sharedBus")
scsi_spec = vim.vm.device.VirtualDeviceSpec()
if adapter_type == "lsilogic":
summary = "LSI Logic"
controller_type = vim.vm.device.VirtualLsiLogicController
elif adapter_type == "lsilogic_sas":
summary = "LSI Logic Sas"
controller_type = vim.vm.device.VirtualLsiLogicSASController
else:
summary = "VMware paravirtual SCSI"
controller_type = vim.vm.device.ParaVirtualSCSIController
for dev in vm.config.hardware.device:
if hasattr(dev, "busNumber"):
if bus_number < dev.busNumber:
bus_number = dev.busNumber
scsi_spec.device = controller_type()
scsi_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_spec.device.busNumber = bus_number
scsi_spec.device.deviceInfo = vim.Description()
scsi_spec.device.deviceInfo.label = scsi_controller_label
scsi_spec.device.deviceInfo.summary = summary
if int(unitNumber) >= 0:
scsi_spec.device.scsiCtlrUnitNumber = int(unitNumber)
if 'hotAddRemove' in scsi_properties:
scsi_spec.device.hotAddRemove = scsi_properties['hotAddRemove']
sharingType = vim.vm.device.VirtualSCSIController.Sharing
if sharedBus == "virtualSharing":
# Virtual disks can be shared between virtual machines on the
# same server
scsi_spec.device.sharedBus = sharingType.virtualSharing
elif sharedBus == "physicalSharing":
# Virtual disks can be shared between virtual machines on
# any server
scsi_spec.device.sharedBus = sharingType.physicalSharing
else:
# Virtual disks cannot be shared between virtual machines
scsi_spec.device.sharedBus = sharingType.noSharing
return scsi_spec, controller_type
def generate_ethernet_card(self, ethernet_card_properties):
network_name = ethernet_card_properties[VSPHERE_RESOURCE_NAME]
switch_distributed = ethernet_card_properties.get('switch_distributed')
adapter_type = ethernet_card_properties.get('adapter_type', "Vmxnet3")
start_connected = ethernet_card_properties.get('start_connected', True)
allow_guest_control = ethernet_card_properties.get(
'allow_guest_control', True)
network_connected = ethernet_card_properties.get(
'network_connected', True)
wake_on_lan_enabled = ethernet_card_properties.get(
'wake_on_lan_enabled', True)
address_type = ethernet_card_properties.get('address_type', 'assigned')
mac_address = ethernet_card_properties.get('mac_address')
if not network_connected and start_connected:
self._logger.debug(
"Network created unconnected so disable start_connected")
start_connected = False
if switch_distributed:
network_obj = self._get_obj_by_name(
vim.dvs.DistributedVirtualPortgroup,
network_name,
)
else:
network_obj = self._get_obj_by_name(
vim.Network,
network_name,
)
if network_obj is None:
raise NonRecoverableError(
'Network {0} could not be found'.format(network_name))
nicspec = vim.vm.device.VirtualDeviceSpec()
# Info level as this is something that was requested in the
# blueprint
self._logger.info('Adding network interface on {name}'
.format(name=network_name))
nicspec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.add
if adapter_type == "E1000e":
controller_type = vim.vm.device.VirtualE1000e
elif adapter_type == "E1000":
controller_type = vim.vm.device.VirtualE1000
elif adapter_type == "Sriov":
controller_type = vim.vm.device.VirtualSriovEthernetCard
elif adapter_type == "Vmxnet2":
controller_type = vim.vm.device.VirtualVmxnet2
else:
controller_type = vim.vm.device.VirtualVmxnet3
nicspec.device = controller_type()
if switch_distributed:
info = vim.vm.device.VirtualEthernetCard\
.DistributedVirtualPortBackingInfo()
nicspec.device.backing = info
nicspec.device.backing.port =\
vim.dvs.PortConnection()
nicspec.device.backing.port.switchUuid =\
network_obj.config.distributedVirtualSwitch.uuid
nicspec.device.backing.port.portgroupKey =\
network_obj.key
else:
nicspec.device.backing = \
vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nicspec.device.backing.network = network_obj.obj
nicspec.device.backing.deviceName = network_name
nicspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nicspec.device.connectable.startConnected = start_connected
nicspec.device.connectable.allowGuestControl = allow_guest_control
nicspec.device.connectable.connected = network_connected
nicspec.device.wakeOnLanEnabled = wake_on_lan_enabled
nicspec.device.addressType = address_type
if mac_address:
nicspec.device.macAddress = mac_address
return nicspec, controller_type
def _with_client(client_name, client):
def decorator(f):
@wraps(f)
def wrapper(connection_config, *args, **kwargs):
kwargs[client_name] = client().get(config=connection_config)
if not hasattr(f, '__wrapped__'):
# don't pass connection_config to the real operation
kwargs.pop('connection_config', None)
return f(*args, **kwargs)
wrapper.__wrapped__ = f
return wrapper
return decorator
with_server_client = _with_client('server_client', ServerClient)
with_network_client = _with_client('network_client', NetworkClient)
with_storage_client = _with_client('storage_client', StorageClient)
with_rawvolume_client = _with_client('rawvolume_client', RawVolumeClient)
|
"""
.. module:: gene_cross_reference_file_generators
:platform: any
:synopsis: Module that generates the Gene Cross Reference files
.. moduleauthor:: AGR consrotium
"""
import os
import logging
import csv
import json
from upload import upload
from headers import create_header
from validators import json_validator
logger = logging.getLogger(name=__name__)
class GeneCrossReferenceFileGenerator:
"""
TBA
"""
def __init__(self, gene_cross_references, generated_files_folder, config_info):
"""
:param gene_cross_references:
:param generated_files_folder:
:param config_info:
"""
self.gene_cross_references = gene_cross_references
self.config_info = config_info
self.generated_files_folder = generated_files_folder
@classmethod
def _generate_header(cls, config_info, taxon_ids, data_format):
"""
:param config_info:
:return:
"""
return create_header('Gene Cross Reference',
config_info.config['RELEASE_VERSION'],
data_format=data_format,
config_info=config_info,
taxon_ids=taxon_ids)
def generate_file(self, upload_flag=False, validate_flag=False):
"""
:param upload_flag:
:return:
"""
TSVfilename = 'agr-gene-cross-references-' + self.config_info.config['RELEASE_VERSION'] + '.tsv'
JSONfilename = 'agr-gene-cross-references-json-' + self.config_info.config['RELEASE_VERSION'] + '.json'
output_filepath = os.path.join(self.generated_files_folder, TSVfilename)
output_filepath_json = os.path.join(self.generated_files_folder, JSONfilename)
gene_cross_reference_file = open(output_filepath, 'w')
columns = ['GeneID',
'GlobalCrossReferenceID',
'CrossReferenceCompleteURL',
'ResourceDescriptorPage',
'TaxonID']
listofxrefs = []
taxon_ids = set()
rows = []
for data in self.gene_cross_references:
listofxrefs.append(data)
row = dict(zip(columns, [None] * len(columns)))
row['GeneID'] = data['GeneID']
row['GlobalCrossReferenceID'] = data['GlobalCrossReferenceID']
row['CrossReferenceCompleteURL'] = data['CrossReferenceCompleteURL']
row['ResourceDescriptorPage'] = data['ResourceDescriptorPage']
row['TaxonID'] = data['TaxonID']
taxon_ids.add(data['TaxonID'])
rows.append(row)
gene_cross_reference_file.write(self._generate_header(self.config_info, taxon_ids, 'tsv'))
tsv_writer = csv.DictWriter(gene_cross_reference_file, delimiter='\t', fieldnames=columns, lineterminator="\n")
tsv_writer.writeheader()
tsv_writer.writerows(rows)
gene_cross_reference_file.close()
with open(output_filepath_json, 'w') as outfile:
contents = {'metadata': self._generate_header(self.config_info, taxon_ids, 'json'),
'data': listofxrefs}
json.dump(contents, outfile)
if validate_flag:
json_validator.JsonValidator(output_filepath_json, 'gene-cross-references').validateJSON()
if upload_flag:
logger.info("Submitting to FMS")
process_name = "1"
logger.info("uploading TSV version of the gene cross references file.")
upload.upload_process(process_name, TSVfilename, self.generated_files_folder, 'GENECROSSREFERENCE',
'COMBINED', self.config_info)
logger.info("uploading JSON version of the gene cross references file.")
upload.upload_process(process_name, JSONfilename, self.generated_files_folder, 'GENECROSSREFERENCEJSON',
'COMBINED', self.config_info)
|
# CS121 Linear regression
# General purpose model representation and selection code
#NAME: ALEISTER MONTFORT
#CNETID: 12174240
import numpy as np
import matplotlib.pylab as plt
import math
from asserts import assert_Xy, assert_Xbeta
#from dataset import DataSet
#############################
# #
# Our code: DO NOT MODIFY #
# #
#############################
def prepend_ones_column(A):
"""
Add a ones column to the left side of an array
"""
ones_col = np.ones((A.shape[0], 1))
return np.hstack([ones_col, A])
def linear_regression(X, y):
"""
Compute linear regression. Finds model, beta, that minimizes
X*beta - y in a least squared sense.
Inputs:
X: (2D Numpy array of floats) predictor/independent variables
y: (1D Numpy array) dependent variable
Returns: Numpy array beta, which is used only by apply_beta
Examples
--------
>>> X = np.array([[5, 2], [3, 2], [6, 2.1], [7, 3]]) # predictors
>>> y = np.array([5, 2, 6, 6]) # dependent
>>> beta = linear_regression(X, y) # compute the coefficients
>>> beta
array([ 1.20104895, 1.41083916, -1.6958042 ])
>>> apply_beta(beta, X) # apply the function defined by beta
array([ 4.86363636, 2.04195804, 6.1048951 , 5.98951049])
"""
assert_Xy(X, y, fname='linear_regression')
X_with_ones = prepend_ones_column(X)
# Do actual computation
beta = np.linalg.lstsq(X_with_ones, y)[0]
return beta
def apply_beta(beta, X):
'''
Apply beta, the function generated by linear_regression, to the
specified values
Inputs:
beta: beta as returned by linear_regression
X: (2D Numpy array of floats) predictor/independent variables
Returns:
result of applying beta to the data, as an array.
Given:
beta = array([B0, B1, B2,...BK])
X = array([[x11, x12, ..., x0K],
[x21, x22, ..., x1K],
...
[xN1, xN2, ..., xNK]])
result will be:
array([B0+B1*x11+B2*x12+...+BK*x1K,
B0+B1*x21+B2*x22+...+BK*x2K,
...
B0+B1*xN1+B2*xN2+...+BK*xNK])
'''
assert_Xbeta(X, beta, fname='apply_beta')
# Add a column of ones
X_incl_ones = prepend_ones_column(X)
# Calculate X*beta
yhat = np.dot(X_incl_ones, beta)
return yhat
###############
# #
# Your code #
# #
###############
class DataSet(object):
'''
Class for representing a data set.
'''
def __init__(self, dir_path):
'''
Constructor
Inputs:
dir_path: (string) path to the directory that contains the
file
'''
self.dir_path = dir_path
get_data = load_numpy_array(dir_path,"data.csv")
get_parameters = load_json_file(dir_path, "parameters.json")
self.predictor_vars = get_parameters["predictor_vars"]
self.name = get_parameters["name"]
self.dependent_var = get_parameters["dependent_var"]
train_test = train_test_split(get_data[1], test_size=None,\
train_size = get_parameters["training_fraction"],\
random_state = get_parameters["seed"])
self.training_data = train_test[0]
self.test_data = train_test[1]
self.labels = get_data[0]
####################################
class Model(object):
def __init__(self, dataset, pred_vars):
'''
Construct a data structure to hold the model.
Inputs:
dataset: a dataset instance
pred_vars: a list of the indices for the columns used in
the model.
'''
self.dataset = dataset
self.pred_vars = pred_vars
# Attributes relative to data and data split
data_t = dataset.training_data
data_test = dataset.test_data
X = data_t[:, pred_vars]
y = data_t[:, dataset.dependent_var]
X_test = data_test[:, pred_vars]
y_test = data_test[:, dataset.dependent_var]
# Attributes relative to model parameters and calculations of
#determination coefficients (R square)
# I decided to include attributes for different data sections
self.betas = linear_regression(X,y)
self.test_betas = linear_regression(X_test, y_test)
self.yhat = apply_beta(self.betas, X)
self.yhat_test = apply_beta(self.test_betas,X_test)
# Calculation of R square for different datasets
self.rsquare = 1-((var(y-self.yhat))/var(y))
self.adjrsquare = self.rsquare - (1-self.rsquare)*((len(self.betas)-1)\
/(len(self.yhat)-((len(self.betas)-2))))
self.test_rsq = 1-((var(y_test-self.yhat_test)/var(y_test)))
self.labels = dataset.labels
def __repr__(self):
'''
Represents the object Model
Inputs: Model parameters that are relevant to represent the model
Returns: String representation of the model
'''
output = ((self.labels[-1]) + "~ " + str(round(self.betas[0],6)))
betas = [self.betas[1:]]
ls = []
for i in self.pred_vars:
ls.append(self.labels[i])
import itertools
tuple_lab_pred = [list(i) for i in zip(betas[0], ls)]
for pair in tuple_lab_pred:
a = (" + " + str((round(pair[0],6))) + "*" + (pair[1]))
output = output + a
return output + "\n" + "R2:" +str(self.rsquare)
def var(y):
'''
Auxiliar function
Returns the variance of an array
Inputs: array
Returns: Float
'''
dev = np.array(y-np.mean(y))
dev_sq = dev**2
return dev_sq.sum()/len(y)
#######################################
###############TASKS##################
def task_1a(data):
'''
Computes univariate models for all k predictor variables in the dataset
Inputs: Dataset with training portion of data
Returns: List with k Models along with their R2 value
'''
list_of_models = []
for element in data.predictor_vars:
model = Model(data, [element])
list_of_models.append(model)
return list_of_models
def task_1b(data):
'''
Computes a single model that uses all of the dataset’s predictor variables
to predict the dependent variable.
Input: Dataset with training portion of data
Returns: Representation object of a multivariate model along with its R2
value.
'''
model = Model(data,data.predictor_vars[:])
return model
def task2(data, n = 2):
'''
Tests all possible bivariate models (K = 2) and determine the one with
the highest R2 value.
Inputs: Dataset with training portion of data
Combinations of n=2 independent variables to test in the models.
Returns: Representation object of a multivariate model along with its R2
value.
'''
from itertools import combinations
list_of_pairs = list(combinations(data.predictor_vars, n)) # Here I create combinations
# of size n from the matrix of
# k independent variables
matrix = []
for pair in list_of_pairs:
model_with_pairs = Model(data,pair) # I run the models for each pair, create
tuple_model = (pair, model_with_pairs.rsquare) # a matrix with models and its R2, and
matrix.append(tuple_model) # select the model with the highest R square
max_pair = max(matrix, key = lambda x:x[1])
best_model = Model(data,max_pair[0])
return best_model
def task3(data):
'''
Implements a heuristic known as Backward elimination. It starts with a set that contains
all potential predictor variables and then repeatedly eliminates variables until the set
contains K variables. At each step, the algorithm identifies the variable in the
model that when subtracted, yields the model with the best R2 value and eliminate it
from the set.
Inputs: DataSet
Returns: A list with the best model for K independent variables
'''
ls1 = data.predictor_vars
list_of_optimals = []
list_of_models = []
while len (ls1) >1:
matrix = []
for i in range(len(ls1)): # Here I create the combinations of k-1 independent
p = (ls1[i + 1:]) # variables, taken from a set of k independent vars.
q = (ls1[:i])
l = p + q
model = Model(data,l) # I run the models and estimate their r square
tuple_model = (l, model.rsquare)
matrix.append(tuple_model) # and generate a matrix of models and R's squares
max_rsqmodel=max(matrix,key=lambda x:x[1]) # Finally I select the model with the largest R square
best_model = Model(data, sorted(max_rsqmodel[0]))
ls1 = max_rsqmodel[0] # And update the list of k independent variables
list_of_optimals.append(ls1) # I use this list later in task 4
list_of_models.append(best_model)
list_of_models.reverse() # I reverse the list to present the univariate model
return list_of_models # first.
def task4 (data):
'''
Estimates the best model from Task 4 according to its
adjusted R square
Inputs: DataSet
Output: The model computed using the backward elimination heuristic that
has the largest adjusted R2 along with both its R2 and adjusted R2 values.
'''
ls1=data.predictor_vars # This is repeated code that should be included in
list_of_optimals=[] # a function.
while len (ls1) >1:
matrix = []
for i in range(len(ls1)):
p = (ls1[i + 1:])
q = (ls1[:i])
l = p+q
model = Model(data,l)
tuple_model = (l, model.rsquare)
matrix.append(tuple_model)
max_rsqmodel=max(matrix,key=lambda x:x[1])
best_model = Model(data, sorted(max_rsqmodel[0], reverse=True))
ls1 = max_rsqmodel[0]
list_of_optimals.append(ls1)
matrix = [] # Here I select the model with the largest R square
for betas in list_of_optimals:
model = Model(data,betas)
model.adjrsquare
tuple_model = (betas,model.adjrsquare)
matrix.append(tuple_model)
max_pair = max(matrix, key = lambda x:x[1])
best_model = Model(data,max_pair[0])
return best_model ,"Adjusted R2:", best_model.adjrsquare
def task5 (data):
'''
Evaluates the selected model in Task 4 with the dataset in data_test.
Input: Dataset
Output: the model chosen for Task 4 along with its R2 value computed using
the training data and its R2 value computed using the testing data.
'''
ls1=data.predictor_vars # Again, this is repeated code. The only change is in the
list_of_optimals=[] # return statement.
while len (ls1) >1:
matrix = []
for i in range(len(ls1)):
p = (ls1[i + 1:])
q = (ls1[:i])
l = p+q
model = Model(data,l)
tuple_model = (l, model.rsquare)
matrix.append(tuple_model)
max_rsqmodel=max(matrix,key=lambda x:x[1])
best_model = Model(data, sorted(max_rsqmodel[0], reverse=True))
ls1 = max_rsqmodel[0]
list_of_optimals.append(ls1)
matrix = []
for betas in list_of_optimals:
model = Model(data,betas)
model.adjrsquare
tuple_model = (betas,model.adjrsquare)
matrix.append(tuple_model)
max_pair = max(matrix, key = lambda x:x[1])
best_model = Model(data,max_pair[0])
return best_model, "Testing R2:", best_model.test_rsq
|
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Config Parser."""
from unittest import mock
from absl.testing import parameterized
from util import app_util
from util import config_parser
# Test case uses the base64 representation of this config:
# {
# "color_terms" : {
# "イエロー": "イエロー",
# "オレンジ": "オレンジ",
# "アプリコット": "オレンジ",
# }
# }
_BASE_64_CONFIG = 'ewogICJjb2xvcl90ZXJtcyIgOiB7CiAgICAi44Kk44Ko44Ot44O8IjogIuOCpOOCqOODreODvCIsCiAgICAi44Kq44Os44Oz44K4IjogIuOCquODrOODs+OCuCIsCiAgICAi44Ki44OX44Oq44Kz44OD44OIIjogIuOCquODrOODs+OCuCIsCiAgfQp9'
@mock.patch.dict(
'flask.current_app.config', {
'DRIVE_CONFIG_OVERRIDES': {
'color_optimizer_config_override': _BASE_64_CONFIG
}
})
class ConfigParserTest(parameterized.TestCase):
def setUp(self):
super().setUp()
app_util.setup_test_app()
@parameterized.named_parameters([{
'testcase_name': 'override',
'config_override_key': 'color_optimizer_config_override',
'config_filename': 'color_optimizer_config_test',
}])
def test_get_config_contents_creates_dict_from_valid_override(
self, config_override_key, config_filename):
config = config_parser.get_config_contents(config_override_key,
config_filename)
self.assertEqual(config['color_terms']['アプリコット'], 'オレンジ')
@parameterized.named_parameters([{
'testcase_name': 'filename',
'config_override_key': 'unknown',
'config_filename': 'color_optimizer_config_test',
}])
def test_get_config_contents_creates_dict_from_valid_file(
self, config_override_key, config_filename):
config = config_parser.get_config_contents(config_override_key,
config_filename)
self.assertEqual(config['color_terms']['apricot'], 'orange')
@parameterized.named_parameters([{
'testcase_name': 'filename',
'config_override_key': 'unknown',
'config_filename': 'unknown',
}])
def test_get_config_contents_returns_empty_dict_when_no_config_matches(
self, config_override_key, config_filename):
config = config_parser.get_config_contents(config_override_key,
config_filename)
self.assertEqual(config, {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.